Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    datestrtodate_sql,
 13    format_time_lambda,
 14    inline_array_sql,
 15    max_or_greatest,
 16    min_or_least,
 17    no_ilike_sql,
 18    parse_date_delta_with_interval,
 19    regexp_replace_sql,
 20    rename_func,
 21    timestrtotime_sql,
 22    ts_or_ds_to_date_sql,
 23)
 24from sqlglot.helper import seq_get, split_num_words
 25from sqlglot.tokens import TokenType
 26
 27logger = logging.getLogger("sqlglot")
 28
 29
 30def _date_add_sql(
 31    data_type: str, kind: str
 32) -> t.Callable[[generator.Generator, exp.Expression], str]:
 33    def func(self, expression):
 34        this = self.sql(expression, "this")
 35        unit = expression.args.get("unit")
 36        unit = exp.var(unit.name.upper() if unit else "DAY")
 37        interval = exp.Interval(this=expression.expression.copy(), unit=unit)
 38        return f"{data_type}_{kind}({this}, {self.sql(interval)})"
 39
 40    return func
 41
 42
 43def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str:
 44    if not expression.find_ancestor(exp.From, exp.Join):
 45        return self.values_sql(expression)
 46
 47    alias = expression.args.get("alias")
 48
 49    structs = [
 50        exp.Struct(
 51            expressions=[
 52                exp.alias_(value, column_name)
 53                for value, column_name in zip(
 54                    t.expressions,
 55                    alias.columns
 56                    if alias and alias.columns
 57                    else (f"_c{i}" for i in range(len(t.expressions))),
 58                )
 59            ]
 60        )
 61        for t in expression.find_all(exp.Tuple)
 62    ]
 63
 64    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 65
 66
 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str:
 68    this = expression.this
 69    if isinstance(this, exp.Schema):
 70        this = f"{this.this} <{self.expressions(this)}>"
 71    else:
 72        this = self.sql(this)
 73    return f"RETURNS {this}"
 74
 75
 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str:
 77    kind = expression.args["kind"]
 78    returns = expression.find(exp.ReturnsProperty)
 79
 80    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 81        expression = expression.copy()
 82        expression.set("kind", "TABLE FUNCTION")
 83
 84        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 85            expression.set("expression", expression.expression.this)
 86
 87        return self.create_sql(expression)
 88
 89    return self.create_sql(expression)
 90
 91
 92def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 93    """Remove references to unnest table aliases since bigquery doesn't allow them.
 94
 95    These are added by the optimizer's qualify_column step.
 96    """
 97    from sqlglot.optimizer.scope import find_all_in_scope
 98
 99    if isinstance(expression, exp.Select):
100        unnest_aliases = {
101            unnest.alias
102            for unnest in find_all_in_scope(expression, exp.Unnest)
103            if isinstance(unnest.parent, (exp.From, exp.Join))
104        }
105        if unnest_aliases:
106            for column in expression.find_all(exp.Column):
107                if column.table in unnest_aliases:
108                    column.set("table", None)
109                elif column.db in unnest_aliases:
110                    column.set("db", None)
111
112    return expression
113
114
115# https://issuetracker.google.com/issues/162294746
116# workaround for bigquery bug when grouping by an expression and then ordering
117# WITH x AS (SELECT 1 y)
118# SELECT y + 1 z
119# FROM x
120# GROUP BY x + 1
121# ORDER by z
122def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
123    if isinstance(expression, exp.Select):
124        group = expression.args.get("group")
125        order = expression.args.get("order")
126
127        if group and order:
128            aliases = {
129                select.this: select.args["alias"]
130                for select in expression.selects
131                if isinstance(select, exp.Alias)
132            }
133
134            for e in group.expressions:
135                alias = aliases.get(e)
136
137                if alias:
138                    e.replace(exp.column(alias))
139
140    return expression
141
142
143def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
144    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
145    if isinstance(expression, exp.CTE) and expression.alias_column_names:
146        cte_query = expression.this
147
148        if cte_query.is_star:
149            logger.warning(
150                "Can't push down CTE column names for star queries. Run the query through"
151                " the optimizer or use 'qualify' to expand the star projections first."
152            )
153            return expression
154
155        column_names = expression.alias_column_names
156        expression.args["alias"].set("columns", None)
157
158        for name, select in zip(column_names, cte_query.selects):
159            to_replace = select
160
161            if isinstance(select, exp.Alias):
162                select = select.this
163
164            # Inner aliases are shadowed by the CTE column names
165            to_replace.replace(exp.alias_(select, name))
166
167    return expression
168
169
170def _parse_timestamp(args: t.List) -> exp.StrToTime:
171    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
172    this.set("zone", seq_get(args, 2))
173    return this
174
175
176def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
177    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
178    return expr_type.from_arg_list(args)
179
180
181def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
182    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
183    arg = seq_get(args, 0)
184    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
185
186
187class BigQuery(Dialect):
188    UNNEST_COLUMN_ONLY = True
189
190    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
191    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
192
193    # bigquery udfs are case sensitive
194    NORMALIZE_FUNCTIONS = False
195
196    TIME_MAPPING = {
197        "%D": "%m/%d/%y",
198    }
199
200    FORMAT_MAPPING = {
201        "DD": "%d",
202        "MM": "%m",
203        "MON": "%b",
204        "MONTH": "%B",
205        "YYYY": "%Y",
206        "YY": "%y",
207        "HH": "%I",
208        "HH12": "%I",
209        "HH24": "%H",
210        "MI": "%M",
211        "SS": "%S",
212        "SSSSS": "%f",
213        "TZH": "%z",
214    }
215
216    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
217    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
218    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
219
220    @classmethod
221    def normalize_identifier(cls, expression: E) -> E:
222        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
223        # The following check is essentially a heuristic to detect tables based on whether or
224        # not they're qualified.
225        if isinstance(expression, exp.Identifier):
226            parent = expression.parent
227
228            while isinstance(parent, exp.Dot):
229                parent = parent.parent
230
231            if (
232                not isinstance(parent, exp.UserDefinedFunction)
233                and not (isinstance(parent, exp.Table) and parent.db)
234                and not expression.meta.get("is_table")
235            ):
236                expression.set("this", expression.this.lower())
237
238        return expression
239
240    class Tokenizer(tokens.Tokenizer):
241        QUOTES = ["'", '"', '"""', "'''"]
242        COMMENTS = ["--", "#", ("/*", "*/")]
243        IDENTIFIERS = ["`"]
244        STRING_ESCAPES = ["\\"]
245
246        HEX_STRINGS = [("0x", ""), ("0X", "")]
247
248        BYTE_STRINGS = [
249            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
250        ]
251
252        RAW_STRINGS = [
253            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
254        ]
255
256        KEYWORDS = {
257            **tokens.Tokenizer.KEYWORDS,
258            "ANY TYPE": TokenType.VARIANT,
259            "BEGIN": TokenType.COMMAND,
260            "BEGIN TRANSACTION": TokenType.BEGIN,
261            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
262            "BYTES": TokenType.BINARY,
263            "DECLARE": TokenType.COMMAND,
264            "FLOAT64": TokenType.DOUBLE,
265            "INT64": TokenType.BIGINT,
266            "RECORD": TokenType.STRUCT,
267            "TIMESTAMP": TokenType.TIMESTAMPTZ,
268            "NOT DETERMINISTIC": TokenType.VOLATILE,
269            "UNKNOWN": TokenType.NULL,
270        }
271        KEYWORDS.pop("DIV")
272
273    class Parser(parser.Parser):
274        PREFIXED_PIVOT_COLUMNS = True
275
276        LOG_BASE_FIRST = False
277        LOG_DEFAULTS_TO_LN = True
278
279        SUPPORTS_USER_DEFINED_TYPES = False
280
281        FUNCTIONS = {
282            **parser.Parser.FUNCTIONS,
283            "DATE": _parse_date,
284            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
285            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
286            "DATE_TRUNC": lambda args: exp.DateTrunc(
287                unit=exp.Literal.string(str(seq_get(args, 1))),
288                this=seq_get(args, 0),
289            ),
290            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
291            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
292            "DIV": binary_from_function(exp.IntDiv),
293            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
294            "MD5": exp.MD5Digest.from_arg_list,
295            "TO_HEX": _parse_to_hex,
296            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
297                [seq_get(args, 1), seq_get(args, 0)]
298            ),
299            "PARSE_TIMESTAMP": _parse_timestamp,
300            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
301            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
302                this=seq_get(args, 0),
303                expression=seq_get(args, 1),
304                position=seq_get(args, 2),
305                occurrence=seq_get(args, 3),
306                group=exp.Literal.number(1)
307                if re.compile(str(seq_get(args, 1))).groups == 1
308                else None,
309            ),
310            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
311            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
312            "SPLIT": lambda args: exp.Split(
313                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
314                this=seq_get(args, 0),
315                expression=seq_get(args, 1) or exp.Literal.string(","),
316            ),
317            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
318            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
319            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
320            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
321            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
322        }
323
324        FUNCTION_PARSERS = {
325            **parser.Parser.FUNCTION_PARSERS,
326            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
327        }
328        FUNCTION_PARSERS.pop("TRIM")
329
330        NO_PAREN_FUNCTIONS = {
331            **parser.Parser.NO_PAREN_FUNCTIONS,
332            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
333        }
334
335        NESTED_TYPE_TOKENS = {
336            *parser.Parser.NESTED_TYPE_TOKENS,
337            TokenType.TABLE,
338        }
339
340        ID_VAR_TOKENS = {
341            *parser.Parser.ID_VAR_TOKENS,
342            TokenType.VALUES,
343        }
344
345        PROPERTY_PARSERS = {
346            **parser.Parser.PROPERTY_PARSERS,
347            "NOT DETERMINISTIC": lambda self: self.expression(
348                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
349            ),
350            "OPTIONS": lambda self: self._parse_with_property(),
351        }
352
353        CONSTRAINT_PARSERS = {
354            **parser.Parser.CONSTRAINT_PARSERS,
355            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
356        }
357
358        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
359            this = super()._parse_table_part(schema=schema)
360
361            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
362            if isinstance(this, exp.Identifier):
363                table_name = this.name
364                while self._match(TokenType.DASH, advance=False) and self._next:
365                    self._advance(2)
366                    table_name += f"-{self._prev.text}"
367
368                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
369
370            return this
371
372        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
373            table = super()._parse_table_parts(schema=schema)
374            if isinstance(table.this, exp.Identifier) and "." in table.name:
375                catalog, db, this, *rest = (
376                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
377                    for x in split_num_words(table.name, ".", 3)
378                )
379
380                if rest and this:
381                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
382
383                table = exp.Table(this=this, db=db, catalog=catalog)
384
385            return table
386
387    class Generator(generator.Generator):
388        EXPLICIT_UNION = True
389        INTERVAL_ALLOWS_PLURAL_FORM = False
390        JOIN_HINTS = False
391        QUERY_HINTS = False
392        TABLE_HINTS = False
393        LIMIT_FETCH = "LIMIT"
394        RENAME_TABLE_WITH_DB = False
395        ESCAPE_LINE_BREAK = True
396        NVL2_SUPPORTED = False
397
398        TRANSFORMS = {
399            **generator.Generator.TRANSFORMS,
400            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
401            exp.ArraySize: rename_func("ARRAY_LENGTH"),
402            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
403            exp.Create: _create_sql,
404            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
405            exp.DateAdd: _date_add_sql("DATE", "ADD"),
406            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
407            exp.DateFromParts: rename_func("DATE"),
408            exp.DateStrToDate: datestrtodate_sql,
409            exp.DateSub: _date_add_sql("DATE", "SUB"),
410            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
411            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
412            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
413            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
414            exp.GroupConcat: rename_func("STRING_AGG"),
415            exp.Hex: rename_func("TO_HEX"),
416            exp.ILike: no_ilike_sql,
417            exp.IntDiv: rename_func("DIV"),
418            exp.JSONFormat: rename_func("TO_JSON_STRING"),
419            exp.Max: max_or_greatest,
420            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
421            exp.MD5Digest: rename_func("MD5"),
422            exp.Min: min_or_least,
423            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
424            exp.RegexpExtract: lambda self, e: self.func(
425                "REGEXP_EXTRACT",
426                e.this,
427                e.expression,
428                e.args.get("position"),
429                e.args.get("occurrence"),
430            ),
431            exp.RegexpReplace: regexp_replace_sql,
432            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
433            exp.ReturnsProperty: _returnsproperty_sql,
434            exp.Select: transforms.preprocess(
435                [
436                    transforms.explode_to_unnest,
437                    _unqualify_unnest,
438                    transforms.eliminate_distinct_on,
439                    _alias_ordered_group,
440                ]
441            ),
442            exp.SHA2: lambda self, e: self.func(
443                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
444            ),
445            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
446            if e.name == "IMMUTABLE"
447            else "NOT DETERMINISTIC",
448            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
449            exp.StrToTime: lambda self, e: self.func(
450                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
451            ),
452            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
453            exp.TimeSub: _date_add_sql("TIME", "SUB"),
454            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
455            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
456            exp.TimeStrToTime: timestrtotime_sql,
457            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
458            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
459            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
460            exp.Unhex: rename_func("FROM_HEX"),
461            exp.Values: _derived_table_values_to_unnest,
462            exp.VariancePop: rename_func("VAR_POP"),
463        }
464
465        TYPE_MAPPING = {
466            **generator.Generator.TYPE_MAPPING,
467            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
468            exp.DataType.Type.BIGINT: "INT64",
469            exp.DataType.Type.BINARY: "BYTES",
470            exp.DataType.Type.BOOLEAN: "BOOL",
471            exp.DataType.Type.CHAR: "STRING",
472            exp.DataType.Type.DECIMAL: "NUMERIC",
473            exp.DataType.Type.DOUBLE: "FLOAT64",
474            exp.DataType.Type.FLOAT: "FLOAT64",
475            exp.DataType.Type.INT: "INT64",
476            exp.DataType.Type.NCHAR: "STRING",
477            exp.DataType.Type.NVARCHAR: "STRING",
478            exp.DataType.Type.SMALLINT: "INT64",
479            exp.DataType.Type.TEXT: "STRING",
480            exp.DataType.Type.TIMESTAMP: "DATETIME",
481            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
482            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
483            exp.DataType.Type.TINYINT: "INT64",
484            exp.DataType.Type.VARBINARY: "BYTES",
485            exp.DataType.Type.VARCHAR: "STRING",
486            exp.DataType.Type.VARIANT: "ANY TYPE",
487        }
488
489        PROPERTIES_LOCATION = {
490            **generator.Generator.PROPERTIES_LOCATION,
491            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
492            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
493        }
494
495        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
496        RESERVED_KEYWORDS = {
497            *generator.Generator.RESERVED_KEYWORDS,
498            "all",
499            "and",
500            "any",
501            "array",
502            "as",
503            "asc",
504            "assert_rows_modified",
505            "at",
506            "between",
507            "by",
508            "case",
509            "cast",
510            "collate",
511            "contains",
512            "create",
513            "cross",
514            "cube",
515            "current",
516            "default",
517            "define",
518            "desc",
519            "distinct",
520            "else",
521            "end",
522            "enum",
523            "escape",
524            "except",
525            "exclude",
526            "exists",
527            "extract",
528            "false",
529            "fetch",
530            "following",
531            "for",
532            "from",
533            "full",
534            "group",
535            "grouping",
536            "groups",
537            "hash",
538            "having",
539            "if",
540            "ignore",
541            "in",
542            "inner",
543            "intersect",
544            "interval",
545            "into",
546            "is",
547            "join",
548            "lateral",
549            "left",
550            "like",
551            "limit",
552            "lookup",
553            "merge",
554            "natural",
555            "new",
556            "no",
557            "not",
558            "null",
559            "nulls",
560            "of",
561            "on",
562            "or",
563            "order",
564            "outer",
565            "over",
566            "partition",
567            "preceding",
568            "proto",
569            "qualify",
570            "range",
571            "recursive",
572            "respect",
573            "right",
574            "rollup",
575            "rows",
576            "select",
577            "set",
578            "some",
579            "struct",
580            "tablesample",
581            "then",
582            "to",
583            "treat",
584            "true",
585            "unbounded",
586            "union",
587            "unnest",
588            "using",
589            "when",
590            "where",
591            "window",
592            "with",
593            "within",
594        }
595
596        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
597            parent = expression.parent
598
599            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
600            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
601            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
602                return self.func(
603                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
604                )
605
606            return super().attimezone_sql(expression)
607
608        def trycast_sql(self, expression: exp.TryCast) -> str:
609            return self.cast_sql(expression, safe_prefix="SAFE_")
610
611        def cte_sql(self, expression: exp.CTE) -> str:
612            if expression.alias_column_names:
613                self.unsupported("Column names in CTE definition are not supported.")
614            return super().cte_sql(expression)
615
616        def array_sql(self, expression: exp.Array) -> str:
617            first_arg = seq_get(expression.expressions, 0)
618            if isinstance(first_arg, exp.Subqueryable):
619                return f"ARRAY{self.wrap(self.sql(first_arg))}"
620
621            return inline_array_sql(self, expression)
622
623        def transaction_sql(self, *_) -> str:
624            return "BEGIN TRANSACTION"
625
626        def commit_sql(self, *_) -> str:
627            return "COMMIT TRANSACTION"
628
629        def rollback_sql(self, *_) -> str:
630            return "ROLLBACK TRANSACTION"
631
632        def in_unnest_op(self, expression: exp.Unnest) -> str:
633            return self.sql(expression)
634
635        def except_op(self, expression: exp.Except) -> str:
636            if not expression.args.get("distinct", False):
637                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
638            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
639
640        def intersect_op(self, expression: exp.Intersect) -> str:
641            if not expression.args.get("distinct", False):
642                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
643            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
644
645        def with_properties(self, properties: exp.Properties) -> str:
646            return self.properties(properties, prefix=self.seg("OPTIONS"))
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
188class BigQuery(Dialect):
189    UNNEST_COLUMN_ONLY = True
190
191    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
192    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
193
194    # bigquery udfs are case sensitive
195    NORMALIZE_FUNCTIONS = False
196
197    TIME_MAPPING = {
198        "%D": "%m/%d/%y",
199    }
200
201    FORMAT_MAPPING = {
202        "DD": "%d",
203        "MM": "%m",
204        "MON": "%b",
205        "MONTH": "%B",
206        "YYYY": "%Y",
207        "YY": "%y",
208        "HH": "%I",
209        "HH12": "%I",
210        "HH24": "%H",
211        "MI": "%M",
212        "SS": "%S",
213        "SSSSS": "%f",
214        "TZH": "%z",
215    }
216
217    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
218    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
219    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
220
221    @classmethod
222    def normalize_identifier(cls, expression: E) -> E:
223        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
224        # The following check is essentially a heuristic to detect tables based on whether or
225        # not they're qualified.
226        if isinstance(expression, exp.Identifier):
227            parent = expression.parent
228
229            while isinstance(parent, exp.Dot):
230                parent = parent.parent
231
232            if (
233                not isinstance(parent, exp.UserDefinedFunction)
234                and not (isinstance(parent, exp.Table) and parent.db)
235                and not expression.meta.get("is_table")
236            ):
237                expression.set("this", expression.this.lower())
238
239        return expression
240
241    class Tokenizer(tokens.Tokenizer):
242        QUOTES = ["'", '"', '"""', "'''"]
243        COMMENTS = ["--", "#", ("/*", "*/")]
244        IDENTIFIERS = ["`"]
245        STRING_ESCAPES = ["\\"]
246
247        HEX_STRINGS = [("0x", ""), ("0X", "")]
248
249        BYTE_STRINGS = [
250            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
251        ]
252
253        RAW_STRINGS = [
254            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
255        ]
256
257        KEYWORDS = {
258            **tokens.Tokenizer.KEYWORDS,
259            "ANY TYPE": TokenType.VARIANT,
260            "BEGIN": TokenType.COMMAND,
261            "BEGIN TRANSACTION": TokenType.BEGIN,
262            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
263            "BYTES": TokenType.BINARY,
264            "DECLARE": TokenType.COMMAND,
265            "FLOAT64": TokenType.DOUBLE,
266            "INT64": TokenType.BIGINT,
267            "RECORD": TokenType.STRUCT,
268            "TIMESTAMP": TokenType.TIMESTAMPTZ,
269            "NOT DETERMINISTIC": TokenType.VOLATILE,
270            "UNKNOWN": TokenType.NULL,
271        }
272        KEYWORDS.pop("DIV")
273
274    class Parser(parser.Parser):
275        PREFIXED_PIVOT_COLUMNS = True
276
277        LOG_BASE_FIRST = False
278        LOG_DEFAULTS_TO_LN = True
279
280        SUPPORTS_USER_DEFINED_TYPES = False
281
282        FUNCTIONS = {
283            **parser.Parser.FUNCTIONS,
284            "DATE": _parse_date,
285            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
286            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
287            "DATE_TRUNC": lambda args: exp.DateTrunc(
288                unit=exp.Literal.string(str(seq_get(args, 1))),
289                this=seq_get(args, 0),
290            ),
291            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
292            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
293            "DIV": binary_from_function(exp.IntDiv),
294            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
295            "MD5": exp.MD5Digest.from_arg_list,
296            "TO_HEX": _parse_to_hex,
297            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
298                [seq_get(args, 1), seq_get(args, 0)]
299            ),
300            "PARSE_TIMESTAMP": _parse_timestamp,
301            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
302            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1),
305                position=seq_get(args, 2),
306                occurrence=seq_get(args, 3),
307                group=exp.Literal.number(1)
308                if re.compile(str(seq_get(args, 1))).groups == 1
309                else None,
310            ),
311            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
312            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
313            "SPLIT": lambda args: exp.Split(
314                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
315                this=seq_get(args, 0),
316                expression=seq_get(args, 1) or exp.Literal.string(","),
317            ),
318            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
319            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
320            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
321            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
322            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
323        }
324
325        FUNCTION_PARSERS = {
326            **parser.Parser.FUNCTION_PARSERS,
327            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
328        }
329        FUNCTION_PARSERS.pop("TRIM")
330
331        NO_PAREN_FUNCTIONS = {
332            **parser.Parser.NO_PAREN_FUNCTIONS,
333            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
334        }
335
336        NESTED_TYPE_TOKENS = {
337            *parser.Parser.NESTED_TYPE_TOKENS,
338            TokenType.TABLE,
339        }
340
341        ID_VAR_TOKENS = {
342            *parser.Parser.ID_VAR_TOKENS,
343            TokenType.VALUES,
344        }
345
346        PROPERTY_PARSERS = {
347            **parser.Parser.PROPERTY_PARSERS,
348            "NOT DETERMINISTIC": lambda self: self.expression(
349                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
350            ),
351            "OPTIONS": lambda self: self._parse_with_property(),
352        }
353
354        CONSTRAINT_PARSERS = {
355            **parser.Parser.CONSTRAINT_PARSERS,
356            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
357        }
358
359        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
360            this = super()._parse_table_part(schema=schema)
361
362            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
363            if isinstance(this, exp.Identifier):
364                table_name = this.name
365                while self._match(TokenType.DASH, advance=False) and self._next:
366                    self._advance(2)
367                    table_name += f"-{self._prev.text}"
368
369                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
370
371            return this
372
373        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
374            table = super()._parse_table_parts(schema=schema)
375            if isinstance(table.this, exp.Identifier) and "." in table.name:
376                catalog, db, this, *rest = (
377                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
378                    for x in split_num_words(table.name, ".", 3)
379                )
380
381                if rest and this:
382                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
383
384                table = exp.Table(this=this, db=db, catalog=catalog)
385
386            return table
387
388    class Generator(generator.Generator):
389        EXPLICIT_UNION = True
390        INTERVAL_ALLOWS_PLURAL_FORM = False
391        JOIN_HINTS = False
392        QUERY_HINTS = False
393        TABLE_HINTS = False
394        LIMIT_FETCH = "LIMIT"
395        RENAME_TABLE_WITH_DB = False
396        ESCAPE_LINE_BREAK = True
397        NVL2_SUPPORTED = False
398
399        TRANSFORMS = {
400            **generator.Generator.TRANSFORMS,
401            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
402            exp.ArraySize: rename_func("ARRAY_LENGTH"),
403            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
404            exp.Create: _create_sql,
405            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
406            exp.DateAdd: _date_add_sql("DATE", "ADD"),
407            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
408            exp.DateFromParts: rename_func("DATE"),
409            exp.DateStrToDate: datestrtodate_sql,
410            exp.DateSub: _date_add_sql("DATE", "SUB"),
411            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
412            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
413            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
414            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
415            exp.GroupConcat: rename_func("STRING_AGG"),
416            exp.Hex: rename_func("TO_HEX"),
417            exp.ILike: no_ilike_sql,
418            exp.IntDiv: rename_func("DIV"),
419            exp.JSONFormat: rename_func("TO_JSON_STRING"),
420            exp.Max: max_or_greatest,
421            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
422            exp.MD5Digest: rename_func("MD5"),
423            exp.Min: min_or_least,
424            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
425            exp.RegexpExtract: lambda self, e: self.func(
426                "REGEXP_EXTRACT",
427                e.this,
428                e.expression,
429                e.args.get("position"),
430                e.args.get("occurrence"),
431            ),
432            exp.RegexpReplace: regexp_replace_sql,
433            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
434            exp.ReturnsProperty: _returnsproperty_sql,
435            exp.Select: transforms.preprocess(
436                [
437                    transforms.explode_to_unnest,
438                    _unqualify_unnest,
439                    transforms.eliminate_distinct_on,
440                    _alias_ordered_group,
441                ]
442            ),
443            exp.SHA2: lambda self, e: self.func(
444                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
445            ),
446            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
447            if e.name == "IMMUTABLE"
448            else "NOT DETERMINISTIC",
449            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
450            exp.StrToTime: lambda self, e: self.func(
451                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
452            ),
453            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
454            exp.TimeSub: _date_add_sql("TIME", "SUB"),
455            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
456            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
457            exp.TimeStrToTime: timestrtotime_sql,
458            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
459            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
460            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
461            exp.Unhex: rename_func("FROM_HEX"),
462            exp.Values: _derived_table_values_to_unnest,
463            exp.VariancePop: rename_func("VAR_POP"),
464        }
465
466        TYPE_MAPPING = {
467            **generator.Generator.TYPE_MAPPING,
468            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
469            exp.DataType.Type.BIGINT: "INT64",
470            exp.DataType.Type.BINARY: "BYTES",
471            exp.DataType.Type.BOOLEAN: "BOOL",
472            exp.DataType.Type.CHAR: "STRING",
473            exp.DataType.Type.DECIMAL: "NUMERIC",
474            exp.DataType.Type.DOUBLE: "FLOAT64",
475            exp.DataType.Type.FLOAT: "FLOAT64",
476            exp.DataType.Type.INT: "INT64",
477            exp.DataType.Type.NCHAR: "STRING",
478            exp.DataType.Type.NVARCHAR: "STRING",
479            exp.DataType.Type.SMALLINT: "INT64",
480            exp.DataType.Type.TEXT: "STRING",
481            exp.DataType.Type.TIMESTAMP: "DATETIME",
482            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
483            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
484            exp.DataType.Type.TINYINT: "INT64",
485            exp.DataType.Type.VARBINARY: "BYTES",
486            exp.DataType.Type.VARCHAR: "STRING",
487            exp.DataType.Type.VARIANT: "ANY TYPE",
488        }
489
490        PROPERTIES_LOCATION = {
491            **generator.Generator.PROPERTIES_LOCATION,
492            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
493            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
494        }
495
496        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
497        RESERVED_KEYWORDS = {
498            *generator.Generator.RESERVED_KEYWORDS,
499            "all",
500            "and",
501            "any",
502            "array",
503            "as",
504            "asc",
505            "assert_rows_modified",
506            "at",
507            "between",
508            "by",
509            "case",
510            "cast",
511            "collate",
512            "contains",
513            "create",
514            "cross",
515            "cube",
516            "current",
517            "default",
518            "define",
519            "desc",
520            "distinct",
521            "else",
522            "end",
523            "enum",
524            "escape",
525            "except",
526            "exclude",
527            "exists",
528            "extract",
529            "false",
530            "fetch",
531            "following",
532            "for",
533            "from",
534            "full",
535            "group",
536            "grouping",
537            "groups",
538            "hash",
539            "having",
540            "if",
541            "ignore",
542            "in",
543            "inner",
544            "intersect",
545            "interval",
546            "into",
547            "is",
548            "join",
549            "lateral",
550            "left",
551            "like",
552            "limit",
553            "lookup",
554            "merge",
555            "natural",
556            "new",
557            "no",
558            "not",
559            "null",
560            "nulls",
561            "of",
562            "on",
563            "or",
564            "order",
565            "outer",
566            "over",
567            "partition",
568            "preceding",
569            "proto",
570            "qualify",
571            "range",
572            "recursive",
573            "respect",
574            "right",
575            "rollup",
576            "rows",
577            "select",
578            "set",
579            "some",
580            "struct",
581            "tablesample",
582            "then",
583            "to",
584            "treat",
585            "true",
586            "unbounded",
587            "union",
588            "unnest",
589            "using",
590            "when",
591            "where",
592            "window",
593            "with",
594            "within",
595        }
596
597        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
598            parent = expression.parent
599
600            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
601            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
602            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
603                return self.func(
604                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
605                )
606
607            return super().attimezone_sql(expression)
608
609        def trycast_sql(self, expression: exp.TryCast) -> str:
610            return self.cast_sql(expression, safe_prefix="SAFE_")
611
612        def cte_sql(self, expression: exp.CTE) -> str:
613            if expression.alias_column_names:
614                self.unsupported("Column names in CTE definition are not supported.")
615            return super().cte_sql(expression)
616
617        def array_sql(self, expression: exp.Array) -> str:
618            first_arg = seq_get(expression.expressions, 0)
619            if isinstance(first_arg, exp.Subqueryable):
620                return f"ARRAY{self.wrap(self.sql(first_arg))}"
621
622            return inline_array_sql(self, expression)
623
624        def transaction_sql(self, *_) -> str:
625            return "BEGIN TRANSACTION"
626
627        def commit_sql(self, *_) -> str:
628            return "COMMIT TRANSACTION"
629
630        def rollback_sql(self, *_) -> str:
631            return "ROLLBACK TRANSACTION"
632
633        def in_unnest_op(self, expression: exp.Unnest) -> str:
634            return self.sql(expression)
635
636        def except_op(self, expression: exp.Except) -> str:
637            if not expression.args.get("distinct", False):
638                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
639            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
640
641        def intersect_op(self, expression: exp.Intersect) -> str:
642            if not expression.args.get("distinct", False):
643                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
644            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
645
646        def with_properties(self, properties: exp.Properties) -> str:
647            return self.properties(properties, prefix=self.seg("OPTIONS"))
UNNEST_COLUMN_ONLY = True
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONTIME', '_PARTITIONDATE'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
221    @classmethod
222    def normalize_identifier(cls, expression: E) -> E:
223        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
224        # The following check is essentially a heuristic to detect tables based on whether or
225        # not they're qualified.
226        if isinstance(expression, exp.Identifier):
227            parent = expression.parent
228
229            while isinstance(parent, exp.Dot):
230                parent = parent.parent
231
232            if (
233                not isinstance(parent, exp.UserDefinedFunction)
234                and not (isinstance(parent, exp.Table) and parent.db)
235                and not expression.meta.get("is_table")
236            ):
237                expression.set("this", expression.this.lower())
238
239        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
generator_class = <class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
241    class Tokenizer(tokens.Tokenizer):
242        QUOTES = ["'", '"', '"""', "'''"]
243        COMMENTS = ["--", "#", ("/*", "*/")]
244        IDENTIFIERS = ["`"]
245        STRING_ESCAPES = ["\\"]
246
247        HEX_STRINGS = [("0x", ""), ("0X", "")]
248
249        BYTE_STRINGS = [
250            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
251        ]
252
253        RAW_STRINGS = [
254            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
255        ]
256
257        KEYWORDS = {
258            **tokens.Tokenizer.KEYWORDS,
259            "ANY TYPE": TokenType.VARIANT,
260            "BEGIN": TokenType.COMMAND,
261            "BEGIN TRANSACTION": TokenType.BEGIN,
262            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
263            "BYTES": TokenType.BINARY,
264            "DECLARE": TokenType.COMMAND,
265            "FLOAT64": TokenType.DOUBLE,
266            "INT64": TokenType.BIGINT,
267            "RECORD": TokenType.STRUCT,
268            "TIMESTAMP": TokenType.TIMESTAMPTZ,
269            "NOT DETERMINISTIC": TokenType.VOLATILE,
270            "UNKNOWN": TokenType.NULL,
271        }
272        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>}
class BigQuery.Parser(sqlglot.parser.Parser):
274    class Parser(parser.Parser):
275        PREFIXED_PIVOT_COLUMNS = True
276
277        LOG_BASE_FIRST = False
278        LOG_DEFAULTS_TO_LN = True
279
280        SUPPORTS_USER_DEFINED_TYPES = False
281
282        FUNCTIONS = {
283            **parser.Parser.FUNCTIONS,
284            "DATE": _parse_date,
285            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
286            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
287            "DATE_TRUNC": lambda args: exp.DateTrunc(
288                unit=exp.Literal.string(str(seq_get(args, 1))),
289                this=seq_get(args, 0),
290            ),
291            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
292            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
293            "DIV": binary_from_function(exp.IntDiv),
294            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
295            "MD5": exp.MD5Digest.from_arg_list,
296            "TO_HEX": _parse_to_hex,
297            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
298                [seq_get(args, 1), seq_get(args, 0)]
299            ),
300            "PARSE_TIMESTAMP": _parse_timestamp,
301            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
302            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1),
305                position=seq_get(args, 2),
306                occurrence=seq_get(args, 3),
307                group=exp.Literal.number(1)
308                if re.compile(str(seq_get(args, 1))).groups == 1
309                else None,
310            ),
311            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
312            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
313            "SPLIT": lambda args: exp.Split(
314                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
315                this=seq_get(args, 0),
316                expression=seq_get(args, 1) or exp.Literal.string(","),
317            ),
318            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
319            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
320            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
321            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
322            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
323        }
324
325        FUNCTION_PARSERS = {
326            **parser.Parser.FUNCTION_PARSERS,
327            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
328        }
329        FUNCTION_PARSERS.pop("TRIM")
330
331        NO_PAREN_FUNCTIONS = {
332            **parser.Parser.NO_PAREN_FUNCTIONS,
333            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
334        }
335
336        NESTED_TYPE_TOKENS = {
337            *parser.Parser.NESTED_TYPE_TOKENS,
338            TokenType.TABLE,
339        }
340
341        ID_VAR_TOKENS = {
342            *parser.Parser.ID_VAR_TOKENS,
343            TokenType.VALUES,
344        }
345
346        PROPERTY_PARSERS = {
347            **parser.Parser.PROPERTY_PARSERS,
348            "NOT DETERMINISTIC": lambda self: self.expression(
349                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
350            ),
351            "OPTIONS": lambda self: self._parse_with_property(),
352        }
353
354        CONSTRAINT_PARSERS = {
355            **parser.Parser.CONSTRAINT_PARSERS,
356            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
357        }
358
359        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
360            this = super()._parse_table_part(schema=schema)
361
362            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
363            if isinstance(this, exp.Identifier):
364                table_name = this.name
365                while self._match(TokenType.DASH, advance=False) and self._next:
366                    self._advance(2)
367                    table_name += f"-{self._prev.text}"
368
369                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
370
371            return this
372
373        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
374            table = super()._parse_table_parts(schema=schema)
375            if isinstance(table.this, exp.Identifier) and "." in table.name:
376                catalog, db, this, *rest = (
377                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
378                    for x in split_num_words(table.name, ".", 3)
379                )
380
381                if rest and this:
382                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
383
384                table = exp.Table(this=this, db=db, catalog=catalog)
385
386            return table

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
SUPPORTS_USER_DEFINED_TYPES = False
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.MAP: 'MAP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NULLABLE: 'NULLABLE'>}
ID_VAR_TOKENS = {<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.INDEX: 'INDEX'>, <TokenType.BINARY: 'BINARY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT: 'INT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SEMI: 'SEMI'>, <TokenType.BIT: 'BIT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DESC: 'DESC'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CASE: 'CASE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NULL: 'NULL'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ALL: 'ALL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.END: 'END'>, <TokenType.TRUE: 'TRUE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.XML: 'XML'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.VIEW: 'VIEW'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.UINT: 'UINT'>, <TokenType.FULL: 'FULL'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SOME: 'SOME'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TEXT: 'TEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.UUID: 'UUID'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.JSON: 'JSON'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.IS: 'IS'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ANY: 'ANY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.ASC: 'ASC'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DATE: 'DATE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TOP: 'TOP'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.MAP: 'MAP'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SET: 'SET'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.INT128: 'INT128'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.INET: 'INET'>, <TokenType.VALUES: 'VALUES'>, <TokenType.VAR: 'VAR'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.KEEP: 'KEEP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INT256: 'INT256'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
UNNEST_COLUMN_ONLY: bool = True
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
388    class Generator(generator.Generator):
389        EXPLICIT_UNION = True
390        INTERVAL_ALLOWS_PLURAL_FORM = False
391        JOIN_HINTS = False
392        QUERY_HINTS = False
393        TABLE_HINTS = False
394        LIMIT_FETCH = "LIMIT"
395        RENAME_TABLE_WITH_DB = False
396        ESCAPE_LINE_BREAK = True
397        NVL2_SUPPORTED = False
398
399        TRANSFORMS = {
400            **generator.Generator.TRANSFORMS,
401            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
402            exp.ArraySize: rename_func("ARRAY_LENGTH"),
403            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
404            exp.Create: _create_sql,
405            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
406            exp.DateAdd: _date_add_sql("DATE", "ADD"),
407            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
408            exp.DateFromParts: rename_func("DATE"),
409            exp.DateStrToDate: datestrtodate_sql,
410            exp.DateSub: _date_add_sql("DATE", "SUB"),
411            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
412            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
413            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
414            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
415            exp.GroupConcat: rename_func("STRING_AGG"),
416            exp.Hex: rename_func("TO_HEX"),
417            exp.ILike: no_ilike_sql,
418            exp.IntDiv: rename_func("DIV"),
419            exp.JSONFormat: rename_func("TO_JSON_STRING"),
420            exp.Max: max_or_greatest,
421            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
422            exp.MD5Digest: rename_func("MD5"),
423            exp.Min: min_or_least,
424            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
425            exp.RegexpExtract: lambda self, e: self.func(
426                "REGEXP_EXTRACT",
427                e.this,
428                e.expression,
429                e.args.get("position"),
430                e.args.get("occurrence"),
431            ),
432            exp.RegexpReplace: regexp_replace_sql,
433            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
434            exp.ReturnsProperty: _returnsproperty_sql,
435            exp.Select: transforms.preprocess(
436                [
437                    transforms.explode_to_unnest,
438                    _unqualify_unnest,
439                    transforms.eliminate_distinct_on,
440                    _alias_ordered_group,
441                ]
442            ),
443            exp.SHA2: lambda self, e: self.func(
444                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
445            ),
446            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
447            if e.name == "IMMUTABLE"
448            else "NOT DETERMINISTIC",
449            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
450            exp.StrToTime: lambda self, e: self.func(
451                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
452            ),
453            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
454            exp.TimeSub: _date_add_sql("TIME", "SUB"),
455            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
456            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
457            exp.TimeStrToTime: timestrtotime_sql,
458            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
459            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
460            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
461            exp.Unhex: rename_func("FROM_HEX"),
462            exp.Values: _derived_table_values_to_unnest,
463            exp.VariancePop: rename_func("VAR_POP"),
464        }
465
466        TYPE_MAPPING = {
467            **generator.Generator.TYPE_MAPPING,
468            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
469            exp.DataType.Type.BIGINT: "INT64",
470            exp.DataType.Type.BINARY: "BYTES",
471            exp.DataType.Type.BOOLEAN: "BOOL",
472            exp.DataType.Type.CHAR: "STRING",
473            exp.DataType.Type.DECIMAL: "NUMERIC",
474            exp.DataType.Type.DOUBLE: "FLOAT64",
475            exp.DataType.Type.FLOAT: "FLOAT64",
476            exp.DataType.Type.INT: "INT64",
477            exp.DataType.Type.NCHAR: "STRING",
478            exp.DataType.Type.NVARCHAR: "STRING",
479            exp.DataType.Type.SMALLINT: "INT64",
480            exp.DataType.Type.TEXT: "STRING",
481            exp.DataType.Type.TIMESTAMP: "DATETIME",
482            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
483            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
484            exp.DataType.Type.TINYINT: "INT64",
485            exp.DataType.Type.VARBINARY: "BYTES",
486            exp.DataType.Type.VARCHAR: "STRING",
487            exp.DataType.Type.VARIANT: "ANY TYPE",
488        }
489
490        PROPERTIES_LOCATION = {
491            **generator.Generator.PROPERTIES_LOCATION,
492            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
493            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
494        }
495
496        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
497        RESERVED_KEYWORDS = {
498            *generator.Generator.RESERVED_KEYWORDS,
499            "all",
500            "and",
501            "any",
502            "array",
503            "as",
504            "asc",
505            "assert_rows_modified",
506            "at",
507            "between",
508            "by",
509            "case",
510            "cast",
511            "collate",
512            "contains",
513            "create",
514            "cross",
515            "cube",
516            "current",
517            "default",
518            "define",
519            "desc",
520            "distinct",
521            "else",
522            "end",
523            "enum",
524            "escape",
525            "except",
526            "exclude",
527            "exists",
528            "extract",
529            "false",
530            "fetch",
531            "following",
532            "for",
533            "from",
534            "full",
535            "group",
536            "grouping",
537            "groups",
538            "hash",
539            "having",
540            "if",
541            "ignore",
542            "in",
543            "inner",
544            "intersect",
545            "interval",
546            "into",
547            "is",
548            "join",
549            "lateral",
550            "left",
551            "like",
552            "limit",
553            "lookup",
554            "merge",
555            "natural",
556            "new",
557            "no",
558            "not",
559            "null",
560            "nulls",
561            "of",
562            "on",
563            "or",
564            "order",
565            "outer",
566            "over",
567            "partition",
568            "preceding",
569            "proto",
570            "qualify",
571            "range",
572            "recursive",
573            "respect",
574            "right",
575            "rollup",
576            "rows",
577            "select",
578            "set",
579            "some",
580            "struct",
581            "tablesample",
582            "then",
583            "to",
584            "treat",
585            "true",
586            "unbounded",
587            "union",
588            "unnest",
589            "using",
590            "when",
591            "where",
592            "window",
593            "with",
594            "within",
595        }
596
597        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
598            parent = expression.parent
599
600            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
601            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
602            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
603                return self.func(
604                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
605                )
606
607            return super().attimezone_sql(expression)
608
609        def trycast_sql(self, expression: exp.TryCast) -> str:
610            return self.cast_sql(expression, safe_prefix="SAFE_")
611
612        def cte_sql(self, expression: exp.CTE) -> str:
613            if expression.alias_column_names:
614                self.unsupported("Column names in CTE definition are not supported.")
615            return super().cte_sql(expression)
616
617        def array_sql(self, expression: exp.Array) -> str:
618            first_arg = seq_get(expression.expressions, 0)
619            if isinstance(first_arg, exp.Subqueryable):
620                return f"ARRAY{self.wrap(self.sql(first_arg))}"
621
622            return inline_array_sql(self, expression)
623
624        def transaction_sql(self, *_) -> str:
625            return "BEGIN TRANSACTION"
626
627        def commit_sql(self, *_) -> str:
628            return "COMMIT TRANSACTION"
629
630        def rollback_sql(self, *_) -> str:
631            return "ROLLBACK TRANSACTION"
632
633        def in_unnest_op(self, expression: exp.Unnest) -> str:
634            return self.sql(expression)
635
636        def except_op(self, expression: exp.Except) -> str:
637            if not expression.args.get("distinct", False):
638                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
639            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
640
641        def intersect_op(self, expression: exp.Intersect) -> str:
642            if not expression.args.get("distinct", False):
643                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
644            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
645
646        def with_properties(self, properties: exp.Properties) -> str:
647            return self.properties(properties, prefix=self.seg("OPTIONS"))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
NVL2_SUPPORTED = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalDayToSecondSpan'>: 'DAY TO SECOND', <class 'sqlglot.expressions.IntervalYearToMonthSpan'>: 'YEAR TO MONTH', <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'on', 'not', 'define', 'right', 'or', 'following', 'rollup', 'current', 'array', 'recursive', 'collate', 'create', 'where', 'is', 'distinct', 'outer', 'for', 'qualify', 'over', 'groups', 'by', 'limit', 'contains', 'interval', 'true', 'default', 'grouping', 'to', 'preceding', 'any', 'of', 'struct', 'within', 'union', 'full', 'exists', 'inner', 'ignore', 'treat', 'case', 'asc', 'cast', 'null', 'between', 'unbounded', 'as', 'if', 'cross', 'escape', 'order', 'no', 'tablesample', 'end', 'enum', 'some', 'set', 'then', 'respect', 'nulls', 'proto', 'false', 'in', 'using', 'intersect', 'new', 'lookup', 'natural', 'group', 'rows', 'window', 'partition', 'desc', 'range', 'join', 'having', 'like', 'cube', 'all', 'else', 'merge', 'from', 'assert_rows_modified', 'extract', 'and', 'lateral', 'unnest', 'when', 'exclude', 'with', 'into', 'fetch', 'at', 'select', 'hash', 'except', 'left'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
597        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
598            parent = expression.parent
599
600            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
601            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
602            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
603                return self.func(
604                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
605                )
606
607            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
609        def trycast_sql(self, expression: exp.TryCast) -> str:
610            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
612        def cte_sql(self, expression: exp.CTE) -> str:
613            if expression.alias_column_names:
614                self.unsupported("Column names in CTE definition are not supported.")
615            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
617        def array_sql(self, expression: exp.Array) -> str:
618            first_arg = seq_get(expression.expressions, 0)
619            if isinstance(first_arg, exp.Subqueryable):
620                return f"ARRAY{self.wrap(self.sql(first_arg))}"
621
622            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
624        def transaction_sql(self, *_) -> str:
625            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
627        def commit_sql(self, *_) -> str:
628            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
630        def rollback_sql(self, *_) -> str:
631            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
633        def in_unnest_op(self, expression: exp.Unnest) -> str:
634            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
636        def except_op(self, expression: exp.Except) -> str:
637            if not expression.args.get("distinct", False):
638                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
639            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
641        def intersect_op(self, expression: exp.Intersect) -> str:
642            if not expression.args.get("distinct", False):
643                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
644            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
646        def with_properties(self, properties: exp.Properties) -> str:
647            return self.properties(properties, prefix=self.seg("OPTIONS"))
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
256    @classmethod
257    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
258        """Checks if text can be identified given an identify option.
259
260        Args:
261            text: The text to check.
262            identify:
263                "always" or `True`: Always returns true.
264                "safe": True if the identifier is case-insensitive.
265
266        Returns:
267            Whether or not the given text can be identified.
268        """
269        if identify is True or identify == "always":
270            return True
271
272        if identify == "safe":
273            return not cls.case_sensitive(text)
274
275        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql