-
Notifications
You must be signed in to change notification settings - Fork 321
adding code for bigquery policy tag extractor #398
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
fb7bc79
c7ef2ef
18b490b
e83fbd0
bc3f6c7
10954ef
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
/* | ||
* Copyright 2023 Google LLC | ||
* | ||
* Licensed under the Apache License, Version 2.0 (the "License"); | ||
* you may not use this file except in compliance with the License. | ||
* You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
/* | ||
* This script creates a table named, top_bytes_scanning_queries_by_hash, | ||
* which contains the top 200 most expensive queries by total bytes scanned | ||
* within the past 30 days. | ||
* 30 days is the default timeframe, but you can change this by setting the | ||
* num_days_to_scan variable to a different value. | ||
* Queries are grouped by their normalized query pattern, which ignores | ||
* comments, parameter values, UDFs, and literals in the query text. | ||
* This allows us to group queries that are logically the same, but | ||
* have different literals. | ||
* | ||
* For example, the following queries would be grouped together: | ||
* SELECT * FROM `my-project.my_dataset.my_table` WHERE date = '2020-01-01' | ||
* SELECT * FROM `my-project.my_dataset.my_table` WHERE date = '2020-01-02' | ||
* SELECT * FROM `my-project.my_dataset.my_table` WHERE date = '2020-01-03' | ||
*/ | ||
|
||
DECLARE num_days_to_scan INT64 DEFAULT 30; | ||
|
||
CREATE TEMP FUNCTION num_stages_with_perf_insights(query_info ANY TYPE) AS ( | ||
COALESCE(( | ||
SELECT SUM(IF(i.slot_contention, 1, 0) + IF(i.insufficient_shuffle_quota, 1, 0)) | ||
FROM UNNEST(query_info.performance_insights.stage_performance_standalone_insights) i), 0) | ||
+ COALESCE(ARRAY_LENGTH(query_info.performance_insights.stage_performance_change_insights), 0) | ||
); | ||
|
||
CREATE SCHEMA IF NOT EXISTS optimization_workshop; | ||
CREATE OR REPLACE TABLE optimization_workshop.queries_grouped_by_hash_project AS | ||
SELECT | ||
statement_type, | ||
query_info.query_hashes.normalized_literals AS query_hash, | ||
COUNT(DISTINCT DATE(start_time)) AS days_active, | ||
ARRAY_AGG(DISTINCT project_id IGNORE NULLS) AS project_ids, | ||
ARRAY_AGG(DISTINCT reservation_id IGNORE NULLS) AS reservation_ids, | ||
SUM(num_stages_with_perf_insights(query_info)) AS num_stages_with_perf_insights, | ||
COUNT(DISTINCT (project_id || ':us.' || job_id)) AS job_count, | ||
ARRAY_AGG( | ||
STRUCT( | ||
bqutil.fn.job_url(project_id || ':us.' || parent_job_id) AS parent_job_url, | ||
bqutil.fn.job_url(project_id || ':us.' || job_id) AS job_url, | ||
query as query_text | ||
) | ||
ORDER BY total_slot_ms | ||
DESC LIMIT 10) AS top_10_jobs, | ||
ARRAY_AGG(DISTINCT user_email) AS user_emails, | ||
SUM(total_bytes_processed) / POW(1024, 3) AS total_gigabytes_processed, | ||
AVG(total_bytes_processed) / POW(1024, 3) AS avg_gigabytes_processed, | ||
SUM(total_slot_ms) / (1000 * 60 * 60) AS total_slot_hours, | ||
AVG(total_slot_ms) / (1000 * 60 * 60) AS avg_total_slot_hours_per_active_day, | ||
AVG(TIMESTAMP_DIFF(end_time, start_time, SECOND) ) AS avg_job_duration_seconds, | ||
ARRAY_AGG(DISTINCT FORMAT("%T",labels)) AS labels, | ||
SUM(total_slot_ms / TIMESTAMP_DIFF(end_time, start_time, MILLISECOND)) AS total_slots, | ||
AVG(total_slot_ms / TIMESTAMP_DIFF(end_time, start_time, MILLISECOND)) AS avg_total_slots, | ||
-- query hashes will all have the same referenced tables so we can use ANY_VALUE below | ||
ANY_VALUE(ARRAY( | ||
SELECT | ||
ref_table.project_id || '.' || | ||
IF(STARTS_WITH(ref_table.dataset_id, '_'), 'TEMP', ref_table.dataset_id) | ||
|| '.' || ref_table.table_id | ||
FROM UNNEST(referenced_tables) ref_table | ||
)) AS referenced_tables, | ||
FROM `region-us`.INFORMATION_SCHEMA.JOBS | ||
WHERE | ||
DATE(creation_time) >= CURRENT_DATE - num_days_to_scan | ||
AND state = 'DONE' | ||
AND error_result IS NULL | ||
AND job_type = 'QUERY' | ||
AND statement_type != 'SCRIPT' | ||
GROUP BY statement_type, query_hash; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
# BigQuery Policy Tag Extractor | ||
|
||
## Introduction | ||
This directory contains the [policy_tag_export.sh](policy_tag_export.sh) bash script which extracts BigQuery policy tag information from a given dataset. The script will iterate through at most 10K tables in a dataset and then for every column with a policy tag, it will output the table name, column name, and policy tag ID in CSV format. | ||
|
||
## Instructions for use | ||
The simplest way to execute this script is to run it directly in Cloud Shell, but if needed it can be executed as part of a larger CI/CD pipeline or process. | ||
|
||
Before using, make sure to update the bash script with the dataset that needs to be reviewed. | ||
|
||
To exceute in Cloud Shell: | ||
1. [Launch a Cloud Shell session](https://cloud.google.com/shell/docs/launching-cloud-shell) in the GCP project where your BigQuery data resides. | ||
* When Cloud Shell is started, the active project in Cloud Shell is propagated to your gcloud configuration inside Cloud Shell for immediate use. GOOGLE_CLOUD_PROJECT, the environmental variable used by Application Default Credentials library, is also set to point to the active project in Cloud Shell. You can also explicitly set the project using `gcloud config set project [PROJECT_ID]`. | ||
1. [Upload](https://cloud.google.com/shell/docs/uploading-and-downloading-files#upload_and_download_files_and_folders) the policy_tag_export.sh script to the Cloud Shell environment. | ||
1. Execute the script by running `bash policy_tag_export.sh`. | ||
1. List the resources in Cloud Shell (ls) and verify that a file called "policy_tags.csv" was created. | ||
1. [Download](https://cloud.google.com/shell/docs/uploading-and-downloading-files#upload_and_download_files_and_folders) the file. | ||
|
||
## Considerations | ||
* Ensure either you or the service account executing the bash script has the bigquery.metadataViewer role to access the required level of information. | ||
* Currently, the extractor only handles simple column types. RECORD type columns with nested policy tags are not supported. | ||
* The extractor can identify specific policy tags on columns, but is limited to the information available to the bq command line tool. In it's current state, this is the full policy tag identifier: | ||
|
||
projects/<PROJECT_ID>/locations/<LOCATION>/taxonomies/<TAXONOMY_ID>/policyTags/<TAG_ID> |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
#!/bin/bash | ||
|
||
# Copyright 2024 Google LLC | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
# Prompt user for DATASET value if not set | ||
if [ -z "$DATASET" ]; then | ||
read -p "Enter the BigQuery dataset name: " DATASET | ||
fi | ||
|
||
#write all tables in a dataset to a reference TXT file | ||
bq --format=sparse ls --max_results=10000 ${DATASET} | awk '{ print $1 }' | sed '1,2d' > table_list.txt | ||
|
||
#loop through each table and export policy tags (if any) to a CSV | ||
echo "Writing to CSV..." | ||
while IFS= read -r TABLE; do | ||
TAG_COUNT="`bq show --schema ${DATASET}.${TABLE} | grep "policyTags" | wc -l`" | ||
|
||
if [ "${TAG_COUNT}" -ge 1 ] | ||
then | ||
COLUMN_AND_TAG=`bq show --format=prettyjson ${DATASET}.${TABLE} | jq '.schema.fields[] | select(.policyTags | length>=1)'` | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't handle RECORD type columns with nested policy tags. Can you either handle it in code or make an explicit callout in README that this script only handles simple column types. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @danieldeleo added a line to the Considerations section of the README calling this out. Will work on updating the code to handle nested tags in the future. |
||
COLUMN=`echo $COLUMN_AND_TAG | jq '.name'` | ||
TAG_ID=`echo $COLUMN_AND_TAG | jq '.policyTags.names[]'` | ||
echo ${TABLE},${COLUMN},${TAG_ID} | tr -d '"' | ||
fi | ||
done < table_list.txt >> policy_tags.csv | ||
echo "Done." |
Uh oh!
There was an error while loading. Please reload this page.