Airflow BigQueryOperator: how to save query result in a partitioned Table? - google-bigquery

I have a simple DAG
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
with DAG(dag_id='my_dags.my_dag') as dag:
start = DummyOperator(task_id='start')
end = DummyOperator(task_id='end')
sql = """
SELECT *
FROM 'another_dataset.another_table'
"""
bq_query = BigQueryOperator(bql=sql,
destination_dataset_table='my_dataset.my_table20180524'),
task_id='bq_query',
bigquery_conn_id='my_bq_connection',
use_legacy_sql=False,
write_disposition='WRITE_TRUNCATE',
create_disposition='CREATE_IF_NEEDED',
query_params={})
start >> bq_query >> end
When executing the bq_query task the SQL query gets saved in a sharded table. I want it to get saved in a daily partitioned table. In order to do so, I only changed destination_dataset_table to my_dataset.my_table$20180524. I got the error below when executing the bq_task:
Partitioning specification must be provided in order to create partitioned table
How can I specify to BigQuery to save query result to a daily partitioned table ? my first guess has been to use query_params in BigQueryOperator
but I didn't find any example on how to use that parameter.
EDIT:
I'm using google-cloud==0.27.0 python client ... and it's the one used in Prod :(

You first need to create an Empty partitioned destination table. Follow instructions here: link to create an empty partitioned table
and then run below airflow pipeline again.
You can try code:
import datetime
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
today_date = datetime.datetime.now().strftime("%Y%m%d")
table_name = 'my_dataset.my_table' + '$' + today_date
with DAG(dag_id='my_dags.my_dag') as dag:
start = DummyOperator(task_id='start')
end = DummyOperator(task_id='end')
sql = """
SELECT *
FROM 'another_dataset.another_table'
"""
bq_query = BigQueryOperator(bql=sql,
destination_dataset_table={{ params.t_name }}),
task_id='bq_query',
bigquery_conn_id='my_bq_connection',
use_legacy_sql=False,
write_disposition='WRITE_TRUNCATE',
create_disposition='CREATE_IF_NEEDED',
query_params={'t_name': table_name},
dag=dag
)
start >> bq_query >> end
So what I did is that I created a dynamic table name variable and passed to the BQ operator.

The main issue here is that I don't have access to the new version of google cloud python API, the prod is using version 0.27.0.
So, to get the job done, I made something bad and dirty:
saved the query result in a sharded table, let it be table_sharded
got table_sharded's schema, let it be table_schema
saved " SELECT * FROM dataset.table_sharded" query to a partitioned table providing table_schema
All this is abstracted in one single operator that uses a hook. The hook is responsible of creating/deleting tables/partitions, getting table schema and running queries on BigQuery.
Have a look at the code. If there is any other solution, please let me know.

Using BigQueryOperator you can pass time_partitioning parameter which will create ingestion-time partitioned tables
bq_cmd = BigQueryOperator (
task_id= "task_id",
sql= [query],
destination_dataset_table= destination_tbl,
use_legacy_sql= False,
write_disposition= 'WRITE_TRUNCATE',
time_partitioning= {'time_partitioning_type':'DAY'},
allow_large_results= True,
trigger_rule= 'all_success',
query_params= query_params,
dag= dag
)

from datetime import datetime,timedelta
from airflow import DAG
from airflow.models import Variable
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DAG_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'retries': 2,
'retry_delay': timedelta(minutes=10),
'project_id': Variable.get('gcp_project'),
'zone': Variable.get('gce_zone'),
'region': Variable.get('gce_region'),
'location': Variable.get('gce_zone'),
}
with DAG(
'test',
start_date=datetime(2019, 1, 1),
schedule_interval=None,
catchup=False,
default_args=DEFAULT_DAG_ARGS) as dag:
bq_query = BigQueryOperator(
task_id='create-partition',
bql="""SELECT
*
FROM
`dataset.table_name`""", -- table from which you want to pull data
destination_dataset_table='project.dataset.table_name' + '$' + datetime.now().strftime('%Y%m%d'), -- Auto partitioned table in Bq
write_disposition='WRITE_TRUNCATE',
create_disposition='CREATE_IF_NEEDED',
use_legacy_sql=False,
)
I recommend to use Variable in Airflow and create all fields and use in DAG.
By above code, partition will be added in Bigquery table for Todays date.

Related

How can I get the results of a query in bigquery into a list?

I'm running a DAG that runs multiple stored procedures in Bigquery in each DAG run. Currently, my code is the following
sp_names = [
'sp_airflow_test_1',
'sp_airflow_test_2'
]
# Define DAG
with DAG(
dag_id,
default_args = default_args) as dag:
for sp in sp_names:
i = i + 1
task_array.append(
BigQueryOperator(
task_id='run_{}'.format(sp),
sql="""CALL `[project].[dataset].{}`();""".format(sp),
use_legacy_sql=False
)
)
if i != len(sp_names):
task_array[i - 1] >> task_array[i]
I'd like my list "sp_names" to be the result of a query I do to a 1 column table that is stored on my BQ dataset, instead of being hardcoded like it is right now.
How can I do this?
Thanks in advance.
To execute multiple Bigquery with a similar SQL structure, create BigQueryOperator dynamically by create_dynamic_task function.
# funtion to create task
def create_dynamic_task(sp):
task = BigQueryOperator(
task_id='run_{}'.format(sp),
sql="""CALL `[project].[dataset].{}`();""".format(sp),
use_legacy_sql=False
)
return task
# dynamically create task
task_list = []
for sp in sp_names:
task_list.append(create_dynamic_task(sp))

Right way to implement pandas.read_sql with ClickHouse

Trying to implement pandas.read_sql function.
I created a clickhouse table and filled it:
create table regions
(
date DateTime Default now(),
region String
)
engine = MergeTree()
PARTITION BY toYYYYMM(date)
ORDER BY tuple()
SETTINGS index_granularity = 8192;
insert into regions (region) values ('Asia'), ('Europe')
Then python code:
import pandas as pd
from sqlalchemy import create_engine
uri = 'clickhouse://default:#localhost/default'
engine = create_engine(uri)
query = 'select * from regions'
pd.read_sql(query, engine)
As the result I expected to get a dataframe with columns date and region but all I get is empty dataframe:
Empty DataFrame
Columns: [2021-01-08 09:24:33, Asia]
Index: []
UPD. It occured that defining clickhouse+native solves the problem.
Can it be solved without +native?
There is encient issue https://github.com/xzkostyan/clickhouse-sqlalchemy/issues/10. Also there is a hint which assumes to add FORMAT TabSeparatedWithNamesAndTypes at the end of a query. So the init query will be look like this:
select *
from regions
FORMAT TabSeparatedWithNamesAndTypes

Is it possible to change the delimiter of AWS athena output file

Here is my sample code where I create a file in S3 bucket using AWS Athena. The file by default is in csv format. Is there a way to change it to pipe delimiter ?
import json
import boto3
def lambda_handler(event, context):
s3 = boto3.client('s3')
client = boto3.client('athena')
# Start Query Execution
response = client.start_query_execution(
QueryString="""
select * from srvgrp
where category_code = 'ACOMNCDU'
""",
QueryExecutionContext={
'Database': 'tmp_db'
},
ResultConfiguration={
'OutputLocation': 's3://tmp-results/athena/'
}
)
queryId = response['QueryExecutionId']
print('Query id is :' + str(queryId))
There is a way to do that with CTAS query.
BUT:
This is a hacky way and not what CTAS queries are supposed to be used for, since it will also create a new table definition in AWS Glue Data Catalog.
I'm not sure about performance
CREATE TABLE "UNIQU_PREFIX__new_table"
WITH (
format = 'TEXTFILE',
external_location = 's3://tmp-results/athena/__SOMETHING_UNIQUE__',
field_delimiter = '|',
bucketed_by = ARRAY['__SOME_COLUMN__'],
bucket_count = 1
) AS
SELECT *
FROM srvgrp
WHERE category_code = 'ACOMNCDU'
Note:
It is important to set bucket_count = 1, otherwise Athena will create multiple files.
Name of the table in CREATE_TABLE ... also should be unique, e.g. use timestamp prefix/suffix which you can inject during python runtime.
External location should be unique, e.g. use timestamp prefix/suffix which you can inject during python runtime. I would advise to embed table name into S3 path.
You need to include in bucketed_by only one of the columns from SELECT.
At some point you would need to clean up AWS Glue Data Catalog from all table defintions that were created in such way

Can BigQuery API overwrite existing table/view with create_table() (tables insert)?

I'm using the Python client create_table() function which calls the underlying tables insert API. There is an exists_ok parameter but this causes the function to simply ignore the create if the table already exists. The problem with this is that when creating a view, I would like to overwrite the existing view SQL if it's already there. What I'm currently doing to get around this is:
if overwrite:
bq_client.delete_table(view, not_found_ok=True)
view = bq_client.create_table(view)
What I don't like about this is there are potentially several seconds during which the view no longer exists. And if the code dies for whatever reason after the delete but before the create then the view is effectively gone.
My question: is there a way to create a table (view) such that it overwrites any existing object? Or perhaps I have to detect this situation and run some kind of update_table() (patch)?
If you want to overwrite an existing table, you can use google.cloud.bigquery.job.WriteDisposition class, please refer to official documentation.
You have three possibilities here: WRITE_APPEND, WRITE_EMPTY and WRITE_TRUNCATE. What you should use, is WRITE_TRUNCATE, which overwrites the table data.
You can see following example here:
from google.cloud import bigquery
import pandas
client = bigquery.Client()
table_id = "<YOUR_PROJECT>.<YOUR_DATASET>.<YOUR_TABLE_NAME>"
records = [
{"artist": u"Michael Jackson", "birth_year": 1958},
{"artist": u"Madonna", "birth_year": 1958},
{"artist": u"Shakira", "birth_year": 1977},
{"artist": u"Taylor Swift", "birth_year": 1989},
]
dataframe = pandas.DataFrame(
records,
columns=["artist", "birth_year"],
index=pandas.Index(
[u"Q2831", u"Q1744", u"Q34424", u"Q26876"], name="wikidata_id"
),
)
job_config = bigquery.LoadJobConfig(
schema=[
bigquery.SchemaField("artist", bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField("wikidata_id", bigquery.enums.SqlTypeNames.STRING),
],
write_disposition="WRITE_TRUNCATE",
)
job = client.load_table_from_dataframe(
dataframe, table_id, job_config=job_config
)
job.result()
table = client.get_table(table_id)
Let me know if it suits your need. I hope it helps.
UPDATED:
You can use following Python code to update a table view using the client library:
client = bigquery.Client(project="projectName")
table_ref = client.dataset('datasetName').table('tableViewName')
table = client.get_table(table_ref)
table.view_query = "SELECT * FROM `projectName.dataset.sourceTableName`"
table = client.update_table(table, ['view_query'])
You can do it this way.
Hope this may help!
from google.cloud import bigquery
clientBQ = bigquery.Client()
def tableExists(tableID, client=clientBQ):
"""
Check if a table already exists using the tableID.
return : (Boolean)
"""
try:
table = client.get_table(tableID)
return True
except NotFound:
return False
if tableExists(viewID, client=clientBQ):
print("View already exists, Deleting the view ... ")
clientBQ .delete_table(viewID)
view = bigquery.Table(viewID)
view.view_query = "SELECT * FROM `PROJECT_ID.DATASET_NAME.TABLE_NAME`"
clientBQ.create_table(view)

pull xcom in BigQueryOperator

I'm trying to run a BigQueryOperator with some dynamic parameter based on a previous task using xcom ( I managed to push it using BashOperator with xcom_push=True)
I thought using the following would do the trick
def get_next_run_date(**context):
last_date = context['task_instance'].xcom_pull(task_ids=['get_autoplay_last_run_date'])[0].rstrip()
last_date = datetime.strptime(last_date, "%Y%m%d").date()
return last_date + timedelta(days=1)
t3 = BigQueryOperator(
task_id='autoplay_calc',
bql='autoplay_calc.sql',
params={
"env" : deployment
,"region" : region
,"partition_start_date" : get_next_run_date()
},
bigquery_conn_id='gcp_conn',
use_legacy_sql=False,
write_disposition='WRITE_APPEND',
allow_large_results=True,
#provide_context=True,
destination_dataset_table=reporting_project + '.pa_reporting_public_batch.autoplay_calc',
dag=dag
)`
but using the above provide me with a Broken Dag Error with 'task_instance' error.
Have you tried using context['ti'].xcom_pull()?
You are using it in a wrong way.
You can not use xcom in params. You need to use it in bql/sql parameter. You sql file, autoplay_calc.sql can contain something like
select * from XYZ where date == "{{xcom_pull(task_ids=['get_autoplay_last_run_date'])[0].rstrip() }}"