From 51a7b5a1982eaaa681e7b846bc037b55ef7feeab Mon Sep 17 00:00:00 2001 From: Yiping Xiong Date: Fri, 15 Sep 2023 10:46:46 +0800 Subject: [PATCH 1/9] TigerGraph Impl --- pom.xml | 1 + tigergraph/README.md | 90 ++ tigergraph/benchmark.properties | 93 ++ tigergraph/create_validation.properties | 93 ++ tigergraph/pom.xml | 56 + tigergraph/queries/tcr1.gsql | 46 + tigergraph/queries/tcr10.gsql | 35 + tigergraph/queries/tcr11.gsql | 38 + tigergraph/queries/tcr12.gsql | 34 + tigergraph/queries/tcr2.gsql | 59 + tigergraph/queries/tcr3.gsql | 33 + tigergraph/queries/tcr4.gsql | 59 + tigergraph/queries/tcr5.gsql | 61 + tigergraph/queries/tcr6.gsql | 34 + tigergraph/queries/tcr7.gsql | 33 + tigergraph/queries/tcr8.gsql | 57 + tigergraph/queries/tcr9.gsql | 48 + tigergraph/queries/trw1.gsql | 42 + tigergraph/queries/trw2.gsql | 43 + tigergraph/queries/trw3.gsql | 73 ++ tigergraph/queries/tsr1.gsql | 11 + tigergraph/queries/tsr2.gsql | 37 + tigergraph/queries/tsr3.gsql | 18 + tigergraph/queries/tsr4.gsql | 18 + tigergraph/queries/tsr5.gsql | 29 + tigergraph/queries/tsr6.gsql | 26 + tigergraph/queries/tw1.gsql | 8 + tigergraph/queries/tw10.gsql | 8 + tigergraph/queries/tw11.gsql | 7 + tigergraph/queries/tw12.gsql | 7 + tigergraph/queries/tw13.gsql | 9 + tigergraph/queries/tw14.gsql | 9 + tigergraph/queries/tw15.gsql | 9 + tigergraph/queries/tw16.gsql | 8 + tigergraph/queries/tw17.gsql | 19 + tigergraph/queries/tw18.gsql | 6 + tigergraph/queries/tw19.gsql | 6 + tigergraph/queries/tw2.gsql | 8 + tigergraph/queries/tw3.gsql | 8 + tigergraph/queries/tw4.gsql | 11 + tigergraph/queries/tw5.gsql | 11 + tigergraph/queries/tw6.gsql | 11 + tigergraph/queries/tw7.gsql | 5 + tigergraph/queries/tw8.gsql | 4 + tigergraph/queries/tw9.gsql | 7 + tigergraph/run.sh | 2 + tigergraph/scripts/ExprFunctions.hpp | 143 ++ tigergraph/scripts/TokenBank.cpp | 64 + tigergraph/scripts/create_queries.sh | 11 + tigergraph/scripts/install_queries.sh | 10 + tigergraph/scripts/load_data.sh | 27 + tigergraph/scripts/one_step_env.sh | 17 + tigergraph/scripts/setup_schema.sh | 108 ++ .../TigerGraphDbConnectionState.java | 71 + .../tigergraph/TigerGraphTransactionDb.java | 1163 +++++++++++++++++ tigergraph/validate_database.properties | 93 ++ 56 files changed, 3037 insertions(+) create mode 100644 tigergraph/README.md create mode 100644 tigergraph/benchmark.properties create mode 100644 tigergraph/create_validation.properties create mode 100644 tigergraph/pom.xml create mode 100644 tigergraph/queries/tcr1.gsql create mode 100644 tigergraph/queries/tcr10.gsql create mode 100644 tigergraph/queries/tcr11.gsql create mode 100644 tigergraph/queries/tcr12.gsql create mode 100644 tigergraph/queries/tcr2.gsql create mode 100644 tigergraph/queries/tcr3.gsql create mode 100644 tigergraph/queries/tcr4.gsql create mode 100644 tigergraph/queries/tcr5.gsql create mode 100644 tigergraph/queries/tcr6.gsql create mode 100644 tigergraph/queries/tcr7.gsql create mode 100644 tigergraph/queries/tcr8.gsql create mode 100644 tigergraph/queries/tcr9.gsql create mode 100644 tigergraph/queries/trw1.gsql create mode 100644 tigergraph/queries/trw2.gsql create mode 100644 tigergraph/queries/trw3.gsql create mode 100644 tigergraph/queries/tsr1.gsql create mode 100644 tigergraph/queries/tsr2.gsql create mode 100644 tigergraph/queries/tsr3.gsql create mode 100644 tigergraph/queries/tsr4.gsql create mode 100644 tigergraph/queries/tsr5.gsql create mode 100644 tigergraph/queries/tsr6.gsql create mode 100644 tigergraph/queries/tw1.gsql create mode 100644 tigergraph/queries/tw10.gsql create mode 100644 tigergraph/queries/tw11.gsql create mode 100644 tigergraph/queries/tw12.gsql create mode 100644 tigergraph/queries/tw13.gsql create mode 100644 tigergraph/queries/tw14.gsql create mode 100644 tigergraph/queries/tw15.gsql create mode 100644 tigergraph/queries/tw16.gsql create mode 100644 tigergraph/queries/tw17.gsql create mode 100644 tigergraph/queries/tw18.gsql create mode 100644 tigergraph/queries/tw19.gsql create mode 100644 tigergraph/queries/tw2.gsql create mode 100644 tigergraph/queries/tw3.gsql create mode 100644 tigergraph/queries/tw4.gsql create mode 100644 tigergraph/queries/tw5.gsql create mode 100644 tigergraph/queries/tw6.gsql create mode 100644 tigergraph/queries/tw7.gsql create mode 100644 tigergraph/queries/tw8.gsql create mode 100644 tigergraph/queries/tw9.gsql create mode 100644 tigergraph/run.sh create mode 100644 tigergraph/scripts/ExprFunctions.hpp create mode 100644 tigergraph/scripts/TokenBank.cpp create mode 100644 tigergraph/scripts/create_queries.sh create mode 100644 tigergraph/scripts/install_queries.sh create mode 100644 tigergraph/scripts/load_data.sh create mode 100644 tigergraph/scripts/one_step_env.sh create mode 100644 tigergraph/scripts/setup_schema.sh create mode 100644 tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java create mode 100644 tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java create mode 100644 tigergraph/validate_database.properties diff --git a/pom.xml b/pom.xml index 1e6a0ba..fda115d 100644 --- a/pom.xml +++ b/pom.xml @@ -26,6 +26,7 @@ tugraph galaxybase-cypher ultipa + tigergraph diff --git a/tigergraph/README.md b/tigergraph/README.md new file mode 100644 index 0000000..e8efb15 --- /dev/null +++ b/tigergraph/README.md @@ -0,0 +1,90 @@ +# TigerGraph LDBC Financial Implemention + +- [1. Preparation](#1-preparation) + - [1.1 Install TigerGraph](#11-install-tigergraph) + - [1.2 Download & Install Dependencies](#12-download--install-dependencies) +- [2. DDL & Loading & Queries](#2-ddl--loading--queries) + - [2.1 Create schema and loading job](#21-create-schema-and-loading-job) + - [2.2 Load data](#22-load-data) + - [2.2.1 Data directory structure](#221-data-directory-structure) + - [2.2.2 Data Loading](#222-data-loading) + - [2.3 Create and install queries](#23-create-and-install-queries) + - [2.4 One-step setup](#24-one-step-setup) +- [3. Validate](#3-validate) + - [3.1 Create validation](#31-create-validation) + - [3.2 Validate](#32-validate) + +--- + +## 1. Preparation + +Before using the project, follow these steps to prepare your environment. + +### 1.1 Install TigerGraph + +For detailed installation instructions, please refer to the official documentation: [TigerGraph Getting Started Guide](https://docs.tigergraph.com/tigergraph-server/current/getting-started/linux) + +### 1.2 Download & Install Dependencies + +To install the project dependencies, perform the following actions: + +```bash +cd ~ +git clone https://github.com/ldbc/ldbc_finbench_transaction_impls.git +cd ~/ldbc_finbench_transaction_impls +mvn clean package +``` + +## 2. DDL & Loading & Queries +Please make sure that you already installed TigerGraph, and logged in as the user can run TigerGraph. +### 2.1 Create schema and loading job +```bash +bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/setup_schema.sh +``` + +### 2.2 load data +#### 2.2.1 Data directory structure +```bash +$ tree -L 2 ~/ldbc_finbench_transaction_impls/tigergraph/data +├── sf1 +│ ├── deletes +│ ├── incremental +│ ├── inserts +│ ├── raw +│ ├── read_params +│ └── snapshot +└── sf10 + ├── incremental + ├── raw + ├── read_params + └── snapshot +``` + +#### 2.2.2 Data Loading +```bash +bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/load_data.sh +``` + +### 2.3 Create and install queries +```bash +bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/create_queries.sh +bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/install_queries.sh +``` + +### 2.4 one-step setup +```bash +bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/one_step_env.sh +``` + +## 3. Validate +### 3.1 Create validation +```shell +cd ~/ldbc_finbench_transaction_impls/tigergraph +bash run.sh create_validation.properties +``` + +### 3.2 Validate +```shell +cd ~/ldbc_finbench_transaction_impls/tigergraph +bash run.sh validate_database.properties +``` \ No newline at end of file diff --git a/tigergraph/benchmark.properties b/tigergraph/benchmark.properties new file mode 100644 index 0000000..ac9d6ec --- /dev/null +++ b/tigergraph/benchmark.properties @@ -0,0 +1,93 @@ +############################################################ +# SUT defined configurations # +############################################################ +ipAddr=34.41.176.245 +port=14240 +user=tigergraph +pass=tigergraph +graph=ldbc_fin + +############################################################ +# Driver configurations # +############################################################ +status=1 +thread_count=16 +name=LDBC-FinBench +# Modes available: 1.CREATE_VALIDATION 2.VALIDATE_DATABASE 3.EXECUTE_BENCHMARK +mode=EXECUTE_BENCHMARK +results_log=true +time_unit=MICROSECONDS +time_compression_ratio=0.1 +peer_identifiers= +workload_statistics=false +spinner_wait_duration=1 +help=false +ignore_scheduled_start_times=false +workload=org.ldbcouncil.finbench.driver.workloads.transaction.LdbcFinBenchTransactionWorkload +db=org.ldbcouncil.finbench.impls.tigergraph.TigerGraphTransactionDb +operation_count=1000000 +validation_parameters_size=1000 +validate_workload=true +validate_database=validation_params.csv +warmup=1000 +ldbc.finbench.transaction.queries.parameters_dir=data/sf10/read_params +ldbc.finbench.transaction.queries.updates_dir=data/sf10/incremental +# param and update files suffix, `csv` or `parquet`, default is `csv` +ldbc.finbench.transaction.queries.files_suffix=csv +ldbc.finbench.transaction.queries.simple_read_dissipation=0.2 +ldbc.finbench.transaction.queries.update_interleave=500 +ldbc.finbench.transaction.queries.scale_factor=1 +# Frequency of complex read queries +ldbc.finbench.transaction.queries.ComplexRead1_freq=10 +ldbc.finbench.transaction.queries.ComplexRead2_freq=45 +ldbc.finbench.transaction.queries.ComplexRead3_freq=23 +ldbc.finbench.transaction.queries.ComplexRead4_freq=57 +ldbc.finbench.transaction.queries.ComplexRead5_freq=83 +ldbc.finbench.transaction.queries.ComplexRead6_freq=21 +ldbc.finbench.transaction.queries.ComplexRead7_freq=36 +ldbc.finbench.transaction.queries.ComplexRead8_freq=51 +ldbc.finbench.transaction.queries.ComplexRead9_freq=21 +ldbc.finbench.transaction.queries.ComplexRead10_freq=6 +ldbc.finbench.transaction.queries.ComplexRead11_freq=12 +ldbc.finbench.transaction.queries.ComplexRead12_freq=34 +# For debugging purposes +ldbc.finbench.transaction.queries.ComplexRead1_enable=true +ldbc.finbench.transaction.queries.ComplexRead2_enable=true +ldbc.finbench.transaction.queries.ComplexRead3_enable=true +ldbc.finbench.transaction.queries.ComplexRead4_enable=true +ldbc.finbench.transaction.queries.ComplexRead5_enable=true +ldbc.finbench.transaction.queries.ComplexRead6_enable=true +ldbc.finbench.transaction.queries.ComplexRead7_enable=true +ldbc.finbench.transaction.queries.ComplexRead8_enable=true +ldbc.finbench.transaction.queries.ComplexRead9_enable=true +ldbc.finbench.transaction.queries.ComplexRead10_enable=true +ldbc.finbench.transaction.queries.ComplexRead11_enable=true +ldbc.finbench.transaction.queries.ComplexRead12_enable=true +ldbc.finbench.transaction.queries.SimpleRead1_enable=true +ldbc.finbench.transaction.queries.SimpleRead2_enable=true +ldbc.finbench.transaction.queries.SimpleRead3_enable=true +ldbc.finbench.transaction.queries.SimpleRead4_enable=true +ldbc.finbench.transaction.queries.SimpleRead5_enable=true +ldbc.finbench.transaction.queries.SimpleRead6_enable=true +ldbc.finbench.transaction.queries.Write1_enable=true +ldbc.finbench.transaction.queries.Write2_enable=true +ldbc.finbench.transaction.queries.Write3_enable=true +ldbc.finbench.transaction.queries.Write4_enable=true +ldbc.finbench.transaction.queries.Write5_enable=true +ldbc.finbench.transaction.queries.Write6_enable=true +ldbc.finbench.transaction.queries.Write7_enable=true +ldbc.finbench.transaction.queries.Write8_enable=true +ldbc.finbench.transaction.queries.Write9_enable=true +ldbc.finbench.transaction.queries.Write10_enable=true +ldbc.finbench.transaction.queries.Write11_enable=true +ldbc.finbench.transaction.queries.Write12_enable=true +ldbc.finbench.transaction.queries.Write13_enable=true +ldbc.finbench.transaction.queries.Write14_enable=true +ldbc.finbench.transaction.queries.Write15_enable=true +ldbc.finbench.transaction.queries.Write16_enable=true +ldbc.finbench.transaction.queries.Write17_enable=true +ldbc.finbench.transaction.queries.Write18_enable=true +ldbc.finbench.transaction.queries.Write19_enable=true +ldbc.finbench.transaction.queries.ReadWrite1_enable=true +ldbc.finbench.transaction.queries.ReadWrite2_enable=true +ldbc.finbench.transaction.queries.ReadWrite3_enable=true \ No newline at end of file diff --git a/tigergraph/create_validation.properties b/tigergraph/create_validation.properties new file mode 100644 index 0000000..913cc9b --- /dev/null +++ b/tigergraph/create_validation.properties @@ -0,0 +1,93 @@ +############################################################ +# SUT defined configurations # +############################################################ +ipAddr=34.41.176.245 +port=14240 +user=tigergraph +pass=tigergraph +graph=ldbc_fin + +############################################################ +# Driver configurations # +############################################################ +status=1 +thread_count=16 +name=LDBC-FinBench +# Modes available: 1.CREATE_VALIDATION 2.VALIDATE_DATABASE 3.EXECUTE_BENCHMARK +mode=CREATE_VALIDATION +results_log=false +time_unit=MICROSECONDS +time_compression_ratio=0.001 +peer_identifiers= +workload_statistics=false +spinner_wait_duration=1 +help=false +ignore_scheduled_start_times=false +workload=org.ldbcouncil.finbench.driver.workloads.transaction.LdbcFinBenchTransactionWorkload +db=org.ldbcouncil.finbench.impls.tigergraph.TigerGraphTransactionDb +operation_count=1000000 +validation_parameters_size=1000 +validate_workload=true +validate_database=validation_params.csv +warmup=0 +ldbc.finbench.transaction.queries.parameters_dir=data/sf1/read_params +ldbc.finbench.transaction.queries.updates_dir=data/sf1/incremental +# param and update files suffix, `csv` or `parquet`, default is `csv` +ldbc.finbench.transaction.queries.files_suffix=csv +ldbc.finbench.transaction.queries.simple_read_dissipation=0.2 +ldbc.finbench.transaction.queries.update_interleave=2147483647 +ldbc.finbench.transaction.queries.scale_factor=1 +# Frequency of complex read queries +ldbc.finbench.transaction.queries.ComplexRead1_freq=1 +ldbc.finbench.transaction.queries.ComplexRead2_freq=1 +ldbc.finbench.transaction.queries.ComplexRead3_freq=1 +ldbc.finbench.transaction.queries.ComplexRead4_freq=1 +ldbc.finbench.transaction.queries.ComplexRead5_freq=1 +ldbc.finbench.transaction.queries.ComplexRead6_freq=1 +ldbc.finbench.transaction.queries.ComplexRead7_freq=1 +ldbc.finbench.transaction.queries.ComplexRead8_freq=1 +ldbc.finbench.transaction.queries.ComplexRead9_freq=1 +ldbc.finbench.transaction.queries.ComplexRead10_freq=1 +ldbc.finbench.transaction.queries.ComplexRead11_freq=1 +ldbc.finbench.transaction.queries.ComplexRead12_freq=1 +# For debugging purposes +ldbc.finbench.transaction.queries.ComplexRead1_enable=true +ldbc.finbench.transaction.queries.ComplexRead2_enable=true +ldbc.finbench.transaction.queries.ComplexRead3_enable=true +ldbc.finbench.transaction.queries.ComplexRead4_enable=true +ldbc.finbench.transaction.queries.ComplexRead5_enable=true +ldbc.finbench.transaction.queries.ComplexRead6_enable=true +ldbc.finbench.transaction.queries.ComplexRead7_enable=true +ldbc.finbench.transaction.queries.ComplexRead8_enable=true +ldbc.finbench.transaction.queries.ComplexRead9_enable=true +ldbc.finbench.transaction.queries.ComplexRead10_enable=true +ldbc.finbench.transaction.queries.ComplexRead11_enable=true +ldbc.finbench.transaction.queries.ComplexRead12_enable=true +ldbc.finbench.transaction.queries.SimpleRead1_enable=true +ldbc.finbench.transaction.queries.SimpleRead2_enable=true +ldbc.finbench.transaction.queries.SimpleRead3_enable=true +ldbc.finbench.transaction.queries.SimpleRead4_enable=true +ldbc.finbench.transaction.queries.SimpleRead5_enable=true +ldbc.finbench.transaction.queries.SimpleRead6_enable=true +ldbc.finbench.transaction.queries.Write1_enable=true +ldbc.finbench.transaction.queries.Write2_enable=true +ldbc.finbench.transaction.queries.Write3_enable=true +ldbc.finbench.transaction.queries.Write4_enable=true +ldbc.finbench.transaction.queries.Write5_enable=true +ldbc.finbench.transaction.queries.Write6_enable=true +ldbc.finbench.transaction.queries.Write7_enable=true +ldbc.finbench.transaction.queries.Write8_enable=true +ldbc.finbench.transaction.queries.Write9_enable=true +ldbc.finbench.transaction.queries.Write10_enable=true +ldbc.finbench.transaction.queries.Write11_enable=true +ldbc.finbench.transaction.queries.Write12_enable=true +ldbc.finbench.transaction.queries.Write13_enable=true +ldbc.finbench.transaction.queries.Write14_enable=true +ldbc.finbench.transaction.queries.Write15_enable=true +ldbc.finbench.transaction.queries.Write16_enable=true +ldbc.finbench.transaction.queries.Write17_enable=true +ldbc.finbench.transaction.queries.Write18_enable=true +ldbc.finbench.transaction.queries.Write19_enable=true +ldbc.finbench.transaction.queries.ReadWrite1_enable=true +ldbc.finbench.transaction.queries.ReadWrite2_enable=true +ldbc.finbench.transaction.queries.ReadWrite3_enable=true diff --git a/tigergraph/pom.xml b/tigergraph/pom.xml new file mode 100644 index 0000000..536820d --- /dev/null +++ b/tigergraph/pom.xml @@ -0,0 +1,56 @@ + + + 4.0.0 + + + impls + org.ldbcouncil.finbench + 0.1.0-alpha + + + tigergraph + tigergraph + 0.1.0-alpha + A tigergraph implementation to help debug queries + + + UTF-8 + 1.8 + 1.8 + + + + + com.tigergraph + tigergraph-jdbc-driver + 1.3.9 + + + com.zaxxer + HikariCP + 3.4.2 + + + + + + + org.apache.maven.plugins + maven-shade-plugin + 3.4.1 + + + + + + package + + shade + + + + + + + diff --git a/tigergraph/queries/tcr1.gsql b/tigergraph/queries/tcr1.gsql new file mode 100644 index 0000000..b3f5917 --- /dev/null +++ b/tigergraph/queries/tcr1.gsql @@ -0,0 +1,46 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tcr1( + VERTEX id, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder +) FOR GRAPH ldbc_fin syntax v1{ + TYPEDEF TUPLE RESULT; + SetAccum @result_set; + HeapAccum(accountDistance ASC, otherId ASC, mediumId ASC) @@result; + + MapAccum> @min_time_map; + + StepNodes = {id}; + + TMP = SELECT s from StepNodes:s POST-ACCUM s.@min_time_map += (0 -> 0); + + INT i = 1; + + WHILE StepNodes.size() > 0 and i <= 3 DO + StepNodes = + SELECT t + FROM StepNodes:s - (transfer:e) -> Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime + and e.timestamp > s.@min_time_map.get(i-1) + ACCUM t.@min_time_map += (i -> e.timestamp); + + R = + SELECT t + FROM StepNodes:s - (signIn_REVERSE:e) -> Medium:t + WHERE e.timestamp > startTime and e.timestamp < endTime and t.isBlocked == true + ACCUM s.@result_set += RESULT(s.id, i, t.id, t.mediumType) + POST-ACCUM + FOREACH result in s.@result_set DO + @@result += result + END, + s.@result_set.clear() + ; + + i = i + 1; + + END; + + PRINT @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr10.gsql b/tigergraph/queries/tcr10.gsql new file mode 100644 index 0000000..218a53d --- /dev/null +++ b/tigergraph/queries/tcr10.gsql @@ -0,0 +1,35 @@ +USE GRAPH ldbc_fin +CREATE QUERY tcr10( + VERTEX pid1, + VERTEX pid2, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin SYNTAX V1 { + OrAccum @pid1_visited; + OrAccum @pid2_visited; + SumAccum @@sum_intersect; + SumAccum @@sum_union; + SumAccum @@jaccardSimilarity; + Nodes = {pid1, pid2}; + Nodes = + SELECT s + FROM Nodes:s -(invest:e)- :t + WHERE e.timestamp > startTime + AND e.timestamp < endTime + ACCUM IF s == pid1 THEN + t.@pid1_visited = TRUE + END, + IF s == pid2 THEN + t.@pid2_visited = TRUE + END + POST-ACCUM + IF t.@pid1_visited == TRUE AND t.@pid2_visited == TRUE THEN + @@sum_intersect += 1 + END, + @@sum_union += 1 + ; + IF @@sum_union > 0 AND @@sum_intersect > 0 THEN + @@jaccardSimilarity = round(@@sum_intersect / @@sum_union, 3); + END; + PRINT @@jaccardSimilarity AS jaccardSimilarity; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr11.gsql b/tigergraph/queries/tcr11.gsql new file mode 100644 index 0000000..6d0ef13 --- /dev/null +++ b/tigergraph/queries/tcr11.gsql @@ -0,0 +1,38 @@ +USE GRAPH ldbc_fin +CREATE QUERY tcr11( + VERTEX id, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" +) FOR GRAPH ldbc_fin SYNTAX V1 { + OrAccum @visited; + SumAccum @@sumLoanAmount; + Nodes = {id}; + Nodes = + SELECT s + FROM Nodes:s + POST-ACCUM + s.@visited = TRUE + ; + All_Nodes = {}; + WHILE Nodes.size() > 0 LIMIT 5 DO + Nodes = + SELECT t + FROM Nodes:s -(guarantee:e)- :t + WHERE t.@visited == FALSE + AND e.timestamp > startTime + AND e.timestamp < endTime + POST-ACCUM + t.@visited = TRUE + ; + All_Nodes = All_Nodes UNION Nodes; + END; + All_Nodes = + SELECT t + FROM All_Nodes:s -(apply:e)- :t + POST-ACCUM + @@sumLoanAmount += t.loanAmount + ; + PRINT round(@@sumLoanAmount, 3) AS sumLoanAmount,All_Nodes.size() AS numLoans; +} diff --git a/tigergraph/queries/tcr12.gsql b/tigergraph/queries/tcr12.gsql new file mode 100644 index 0000000..8d75d12 --- /dev/null +++ b/tigergraph/queries/tcr12.gsql @@ -0,0 +1,34 @@ +USE GRAPH ldbc_fin +CREATE QUERY tcr12( + VERTEX id, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" +) FOR GRAPH ldbc_fin SYNTAX V1 { + SumAccum @sum_amount; + Nodes = {id}; + Nodes = + SELECT t + FROM Nodes:s -(own:e)- :t + ; + Nodes = + SELECT t + FROM Nodes:s -(transfer:e)- :t + WHERE e.timestamp > startTime + AND e.timestamp < endTime + ACCUM t.@sum_amount += e.amount + ; + Nodes = + SELECT s + FROM Nodes:s -(own_REVERSE:e)- Company:t + POST-ACCUM + s.@sum_amount = s.@sum_amount + ORDER BY s.@sum_amount DESC, + s.id ASC + ; + PRINT Nodes[ + Nodes.id AS compAccountId, + round(Nodes.@sum_amount, 3) AS sumEdge2Amount + ]; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr2.gsql b/tigergraph/queries/tcr2.gsql new file mode 100644 index 0000000..42d96ed --- /dev/null +++ b/tigergraph/queries/tcr2.gsql @@ -0,0 +1,59 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tcr2( + VERTEX id, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder +) FOR GRAPH ldbc_fin syntax v1{ + + TYPEDEF TUPLE RESULT; + HeapAccum(sumLoanAmount DESC, otherId ASC) @@result; + + MapAccum> @max_time_map; + + MapAccum @multi_edge_cnt; + MapAccum @sumLoanAmount; + MapAccum @sumLoanBalance; + + P = {id}; + StepNodes = + SELECT t from P:s - (own:e) -> Account:t + ACCUM t.@max_time_map += (0 -> GSQL_UINT_MAX); + + OtherAccounts (Account) = {}; + INT i = 1; + WHILE StepNodes.size() > 0 and i <= 3 DO + StepNodes = + SELECT t + FROM StepNodes:s - (transfer_REVERSE:e) -> Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime + and e.timestamp < s.@max_time_map.get(i-1) + ACCUM t.@max_time_map += (i -> e.timestamp); + + OtherAccounts = OtherAccounts UNION StepNodes; + + i = i + 1; + + END; + + R = + SELECT s + FROM OtherAccounts:s - (deposit_REVERSE:e) -> Loan:t + WHERE e.timestamp > startTime and e.timestamp < endTime + ACCUM + s.@multi_edge_cnt += (t.id -> 1), + s.@sumLoanAmount += (t.id -> t.loanAmount), + s.@sumLoanBalance += (t.id -> t.balance) + POST-ACCUM + DOUBLE sumLoanAmount = 0, + DOUBLE sumLoanBalance = 0, + FOREACH (t_id, cnt) in s.@multi_edge_cnt DO + sumLoanAmount = sumLoanAmount + s.@sumLoanAmount.get(t_id)/cnt, + sumLoanBalance = sumLoanBalance + s.@sumLoanBalance.get(t_id)/cnt + END, + @@result += RESULT(s.id, round(sumLoanAmount,3), round(sumLoanBalance,3)) + ; + + PRINT @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr3.gsql b/tigergraph/queries/tcr3.gsql new file mode 100644 index 0000000..083c47a --- /dev/null +++ b/tigergraph/queries/tcr3.gsql @@ -0,0 +1,33 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tcr3( + VERTEX id1, + VERTEX id2, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin{ + INT shortestPathLength = -1; + + StepNodes = {id1}; + OrAccum @@is_found; // (default value = FALSE) + OrAccum @is_visited; // (default value = FALSE) + INT cur_len = 0; + + WHILE @@is_found == FALSE AND StepNodes.size() > 0 DO + StepNodes = + SELECT t + FROM StepNodes:s - (transfer>:e) - Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime + and t.@is_visited == FALSE + POST-ACCUM + t.@is_visited += TRUE, + IF t == id2 THEN + @@is_found += TRUE + END + ; + cur_len = cur_len + 1; + END; + IF @@is_found THEN + shortestPathLength = cur_len; + END; + PRINT shortestPathLength; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr4.gsql b/tigergraph/queries/tcr4.gsql new file mode 100644 index 0000000..14023ae --- /dev/null +++ b/tigergraph/queries/tcr4.gsql @@ -0,0 +1,59 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tcr4( + VERTEX id1, + VERTEX id2, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin syntax v1 { + # define the result TUPLE, and HeapAccum for sort + TYPEDEF TUPLE RESULT; + HeapAccum(sumEdge2Amount DESC, sumEdge3Amount DESC, otherId ASC) @@result; + + SumAccum @numEdge2; + SumAccum @sumEdge2Amount; + MaxAccum @maxEdge2Amount; + SumAccum @numEdge3; + SumAccum @sumEdge3Amount; + MaxAccum @maxEdge3Amount; + + Src = {id1}; + Dst = {id2}; + + TMP = SELECT t from Src:s - (transfer:e) -> Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime and t == id2; + + IF TMP.size() == 0 THEN + PRINT @@result; + RETURN; + END; + + MidDst = + SELECT m + FROM Dst:t - (transfer:e3) -> Account:m + WHERE e3.timestamp > startTime and e3.timestamp < endTime + ACCUM + m.@numEdge3 +=1, + m.@sumEdge3Amount += e3.amount, + m.@maxEdge3Amount += e3.amount + ; + MidSrc = + SELECT m + FROM Src:s - (transfer_REVERSE:e2) -> Account:m + WHERE e2.timestamp > startTime and e2.timestamp < endTime + ACCUM + m.@numEdge2 +=1, + m.@sumEdge2Amount += e2.amount, + m.@maxEdge2Amount += e2.amount + ; + Other = MidDst INTERSECT MidSrc; + + Other = + SELECT m FROM Other:m + POST-ACCUM + @@result += RESULT(m.id, m.@numEdge2, round(m.@sumEdge2Amount,3), round(m.@maxEdge2Amount,3), + m.@numEdge3, round(m.@sumEdge3Amount,3), round(m.@maxEdge3Amount,3)) + ; + + PRINT @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr5.gsql b/tigergraph/queries/tcr5.gsql new file mode 100644 index 0000000..0bf2dc8 --- /dev/null +++ b/tigergraph/queries/tcr5.gsql @@ -0,0 +1,61 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tcr5( + VERTEX id, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" +) FOR GRAPH ldbc_fin SYNTAX V1 { + MapAccum> @map_path_min_timestamp; + MaxAccum @max_hop; + ListAccum @@list_path; + ListAccum @@list_path_ordered; + STRING delimiter = "->"; + Nodes = {id}; + Nodes = + SELECT t + FROM Nodes:s -(own:e)- :t + POST-ACCUM + t.@map_path_min_timestamp += (to_string(t.id) -> 0) + ; + UINT path_len = 1; + All_Nodes = {}; + WHILE Nodes.size() > 0 LIMIT 3 DO + Nodes = + SELECT t + FROM Nodes:s -(transfer:e)- :t + WHERE e.timestamp > startTime + AND e.timestamp < endTime + ACCUM FOREACH (str_path, min_timestamp) IN s.@map_path_min_timestamp DO + IF e.timestamp > min_timestamp + AND circle_and_len_check(str_path, to_string(t.id), path_len, delimiter) == TRUE THEN // str_path's length is equal to path_len-1 and no circle + t.@map_path_min_timestamp += (str_path + delimiter + to_string(t.id) -> e.timestamp), + t.@max_hop += path_len + END + END + HAVING t.@max_hop == path_len + ; + All_Nodes = All_Nodes UNION Nodes; + path_len = path_len + 1; + END; + All_Nodes = + SELECT s + FROM All_Nodes:s + POST-ACCUM + FOREACH (str_path, min_timestamp) IN s.@map_path_min_timestamp DO + IF min_timestamp > 0 THEN + @@list_path += str_path + END + END + ; + path_len = 3; + WHILE path_len >= 1 DO + FOREACH str_path IN @@list_path DO + IF get_path_len(str_path, delimiter) == path_len THEN + @@list_path_ordered += str_path; + END; + END; + path_len = path_len - 1; + END; + PRINT @@list_path_ordered AS path; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr6.gsql b/tigergraph/queries/tcr6.gsql new file mode 100644 index 0000000..d66cbf4 --- /dev/null +++ b/tigergraph/queries/tcr6.gsql @@ -0,0 +1,34 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tcr6( + UINT accountId, + DOUBLE threshold1, + DOUBLE threshold2, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" + ) syntax v1{ + TYPEDEF TUPLE RESULT; + HeapAccum(sumEdge2Amount DESC,midId ASC) @@result; + SumAccum @sumAmountE1; + SumAccum @sumAmountE2; + SumAccum @transferInE1Count; + + Seed = SELECT src FROM Account:src WHERE src.id == accountId and src.accountType like "%card%"; + + S = SELECT mid FROM Seed:dst -(withdraw_REVERSE:e2)->Account:mid + WHERE e2.timestamp > startTime AND e2.timestamp < endTime AND e2.amount > threshold2 + ACCUM mid.@sumAmountE2 += e2.amount + ; + + S = SELECT mid FROM S:mid - (transfer_REVERSE:e1)->Account:src + WHERE e1.timestamp > startTime AND e1.timestamp < endTime AND e1.amount > threshold1 + ACCUM mid.@sumAmountE1 += e1.amount, + mid.@transferInE1Count += 1 + POST-ACCUM + IF mid.@transferInE1Count > 3 THEN + @@result += RESULT(mid.id, round(mid.@sumAmountE1, 3), round(mid.@sumAmountE2, 3)) + END + ; + print @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr7.gsql b/tigergraph/queries/tcr7.gsql new file mode 100644 index 0000000..27d3da1 --- /dev/null +++ b/tigergraph/queries/tcr7.gsql @@ -0,0 +1,33 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tcr7( + VERTEX accountId, + DOUBLE threshold, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" + ) syntax v1{ + SumAccum @@sumAmountE1, @@sumAmountE2; + + Seed = {accountId}; + + transfersIn = SELECT s FROM Seed:t - (transfer_REVERSE:e1) -> Account:s + WHERE e1.amount > threshold + AND e1.timestamp > startTime + AND e1.timestamp < endTime + ACCUM @@sumAmountE1 += e1.amount + ; + + transfersOut = SELECT t FROM Seed:s - (transfer:e2) -> Account:t + WHERE e2.amount > threshold + AND e2.timestamp > startTime + AND e2.timestamp < endTime + ACCUM @@sumAmountE2 += e2.amount + ; + + IF transfersOut.size() == 0 THEN + PRINT transfersIn.size() AS numSrc, transfersOut.size() AS numDst, -1 AS inOutRatio; + ELSE + PRINT transfersIn.size() AS numSrc, transfersOut.size() AS numDst, round(@@sumAmountE1 * 1.0/@@sumAmountE2, 3) AS inOutRatio; + END; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr8.gsql b/tigergraph/queries/tcr8.gsql new file mode 100644 index 0000000..5458fa4 --- /dev/null +++ b/tigergraph/queries/tcr8.gsql @@ -0,0 +1,57 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tcr8( + VERTEX loanId, + FLOAT threshold, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" + ) syntax v1{ + TYPEDEF TUPLE RESULT; + HeapAccum(distanceFromLoan DESC, final_ratio DESC, dstId ASC) @@result; + MaxAccum @upstreamAmount; + SumAccum @totalFlowInAmount; + MinAccum @minDist; + OrAccum @processed; + LoanV = {loanId}; + DOUBLE loanAmt=loanId.loanAmount; + INT maxDistLimit = 4; + + Nodes = SELECT t FROM LoanV:s -(deposit:e1) -> Account:t + WHERE e1.timestamp > startTime AND e1.timestamp < endTime + ACCUM t.@upstreamAmount += e1.amount, + t.@minDist = maxDistLimit + 1 + ; + + All_Nodes = Nodes; + INT distance = 1; + WHILE Nodes.size() > 0 LIMIT 3 DO + distance = distance + 1; + Nodes = SELECT t + FROM Nodes:s -((transfer | withdraw):e) -> :t + WHERE e.timestamp > startTime AND e.timestamp < endTime + AND e.amount > s.@upstreamAmount * threshold // threshold must be positive + ACCUM t.@upstreamAmount += e.amount + POST-ACCUM + IF t.@processed == FALSE THEN + t.@minDist += distance + END, + t.@processed += TRUE + ; + All_Nodes = All_Nodes UNION Nodes; + END; + + P = SELECT s FROM All_Nodes:s -((transfer | withdraw):e) -> Account:t + WHERE t.@processed != FALSE + AND s.@minDist != maxDistLimit + AND e.timestamp > startTime AND e.timestamp < endTime + ACCUM t.@totalFlowInAmount += e.amount + ; + + R = SELECT s FROM Account:s + WHERE s.@processed != FALSE + ACCUM @@result += RESULT(s.id, s.@minDist, round(s.@totalFlowInAmount/loanAmt, 3)) + ; + + print @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tcr9.gsql b/tigergraph/queries/tcr9.gsql new file mode 100644 index 0000000..a97d228 --- /dev/null +++ b/tigergraph/queries/tcr9.gsql @@ -0,0 +1,48 @@ +USE GRAPH ldbc_fin +CREATE QUERY tcr9( + VERTEX id, + DOUBLE threshold, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" +) FOR GRAPH ldbc_fin SYNTAX V1 { + SumAccum @@sum_deposit; + SumAccum @@sum_repay; + SumAccum @@sum_transfers_in; + SumAccum @@sum_transfers_out; + SumAccum @@ratioRepay; + SumAccum @@ratioDeposit; + SumAccum @@ratioTransfer; + Nodes = {id}; + + Nodes = + SELECT s + FROM Nodes:s -((deposit_REVERSE|repay|transfer_REVERSE|transfer):e)- :t + WHERE e.amount > threshold + AND e.timestamp > startTime + AND e.timestamp < endTime + ACCUM IF e.type == "deposit_REVERSE" THEN + @@sum_deposit += e.amount + ELSE IF e.type == "repay" THEN + @@sum_repay += e.amount + ELSE IF e.type == "transfer_REVERSE" THEN + @@sum_transfers_in += e.amount + ELSE IF e.type == "transfer" THEN + @@sum_transfers_out += e.amount + END + ; + IF @@sum_repay > 0 THEN + @@ratioRepay = round(@@sum_deposit / @@sum_repay, 3); + ELSE + @@ratioRepay = -1; + END; + IF @@sum_transfers_out > 0 THEN + @@ratioDeposit = round(@@sum_deposit / @@sum_transfers_out, 3); + @@ratioTransfer = round(@@sum_transfers_in / @@sum_transfers_out, 3); + ELSE + @@ratioDeposit = -1; + @@ratioTransfer = -1; + END; + PRINT @@ratioRepay AS ratioRepay,@@ratioDeposit AS ratioDeposit,@@ratioTransfer AS ratioTransfer; +} \ No newline at end of file diff --git a/tigergraph/queries/trw1.gsql b/tigergraph/queries/trw1.gsql new file mode 100644 index 0000000..4b34257 --- /dev/null +++ b/tigergraph/queries/trw1.gsql @@ -0,0 +1,42 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY trw1( + UINT srcId, + UINT dstId, + UINT time, + DOUBLE amount, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin syntax v1{ + + # check is_blocked + Nodes = SELECT s FROM Account:s WHERE s.id IN (srcId,dstId) AND s.isBlocked == TRUE; + IF Nodes.size() > 0 THEN + PRINT "Have Account blocked, abort"; + RETURN; + END; + + # check whether other Account exist + Src = SELECT s FROM Account:s WHERE s.id == srcId; + Dst = SELECT t FROM Account:t WHERE t.id == dstId; + + MidDst = + SELECT m + FROM Dst:t - (transfer:e3) -> Account:m + WHERE e3.timestamp > startTime and e3.timestamp < endTime + ; + MidSrc = + SELECT m + FROM Src:s - (transfer_REVERSE:e2) -> Account:m + WHERE e2.timestamp > startTime and e2.timestamp < endTime + ; + Other = MidDst INTERSECT MidSrc; + + IF Other.size() == 0 THEN // not exist, insert + PRINT "No cycle, will commit"; + INSERT INTO transfer (FROM, TO, timestamp, amount) VALUES (srcId, dstId, time, amount); + ELSE // exists, update + PRINT "Have cycle, will mark blocked"; + UPDATE s FROM Src:s SET s.isBlocked = true; + UPDATE t FROM Dst:t SET t.isBlocked = true; + END; +} \ No newline at end of file diff --git a/tigergraph/queries/trw2.gsql b/tigergraph/queries/trw2.gsql new file mode 100644 index 0000000..3e30e32 --- /dev/null +++ b/tigergraph/queries/trw2.gsql @@ -0,0 +1,43 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY trw2( + UINT srcId, + UINT dstId, + UINT time, + UINT startTime, + UINT endTime, + DOUBLE amount, + DOUBLE amountThreshold, + DOUBLE ratioThreshold, + INT truncationLimit, + STRING truncationOrder +) FOR GRAPH ldbc_fin syntax v1{ + SumAccum @totalE1Amount, @totalE2Amount, @ratio; + SumAccum @outCount; + + Nodes = SELECT s FROM Account:s WHERE s.id IN (srcId,dstId) AND s.isBlocked == TRUE; + IF Nodes.size() > 0 THEN + RETURN; + END; + + INSERT INTO transfer VALUES (srcId, dstId, time, _, amount, _, _, _); + + transfersIn = SELECT t FROM Nodes:t - (transfer_REVERSE:e1) -> Account:s + WHERE e1.amount > amountThreshold + AND e1.timestamp > startTime + AND e1.timestamp < endTime + ACCUM t.@totalE1Amount += e1.amount + ; + + transfersOut = SELECT s FROM Nodes:s - (transfer:e2) -> Account:t + WHERE e2.amount > amountThreshold + AND e2.timestamp > startTime + AND e2.timestamp < endTime + ACCUM s.@totalE2Amount += e2.amount, + s.@outCount += 1 + ; + + Nodes = SELECT s FROM Nodes:s + POST-ACCUM + s.isBlocked = (s.@outCount > 0 AND s.@totalE1Amount/s.@totalE2Amount <= ratioThreshold) OR (s.@outCount == 0 AND ratioThreshold >= -1) + ; +} \ No newline at end of file diff --git a/tigergraph/queries/trw3.gsql b/tigergraph/queries/trw3.gsql new file mode 100644 index 0000000..8a94029 --- /dev/null +++ b/tigergraph/queries/trw3.gsql @@ -0,0 +1,73 @@ +USE GRAPH ldbc_fin +CREATE QUERY trw3( + UINT srcId, + UINT dstId, + UINT time, + DOUBLE threshold, + UINT startTime, + UINT endTime, + INT truncationLimit, + STRING truncationOrder = "TIMESTAMP_DESCENDING" +) FOR GRAPH ldbc_fin SYNTAX V1 { + Nodes = SELECT s FROM Person:s WHERE s.id IN (srcId,dstId) AND s.isBlocked == TRUE; + IF Nodes.size() > 0 THEN + PRINT "One of the persons is blocked!"; + RETURN; + END; + + OrAccum @visited; + SumAccum @@sumLoanAmount; + Nodes = SELECT s FROM Person:s WHERE s.id == srcId; + All_Nodes = Nodes; + Nodes = + SELECT s + FROM Nodes:s + POST-ACCUM + s.@visited = TRUE + ; + Nodes = + SELECT t + FROM Nodes:s -(guarantee:e)- :t + WHERE t.@visited == FALSE + AND e.timestamp > startTime + AND e.timestamp < endTime + POST-ACCUM + t.@visited = TRUE + ; + All_Nodes = All_Nodes UNION Nodes; + IF time > startTime AND time < endTime THEN + Dst_Nodes = SELECT t FROM Person:t WHERE t.id == dstId; + Dst_Nodes = + SELECT s + FROM Dst_Nodes:s + POST-ACCUM + s.@visited = TRUE + ; + Nodes = Nodes UNION Dst_Nodes; + All_Nodes = All_Nodes UNION Dst_Nodes; + END; + WHILE Nodes.size() > 0 DO + Nodes = + SELECT t + FROM Nodes:s -(guarantee:e)- :t + WHERE t.@visited == FALSE + AND e.timestamp > startTime + AND e.timestamp < endTime + POST-ACCUM + t.@visited = TRUE + ; + All_Nodes = All_Nodes UNION Nodes; + END; + All_Nodes = + SELECT t + FROM All_Nodes:s -(apply:e)- :t + POST-ACCUM + @@sumLoanAmount += t.loanAmount + ; + + IF @@sumLoanAmount > threshold THEN + UPDATE s FROM Person:s SET s.isBlocked = true WHERE s.id IN (srcId,dstId); + ELSE + INSERT INTO guarantee VALUES (srcId Person, dstId Person, time, _); + END; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr1.gsql b/tigergraph/queries/tsr1.gsql new file mode 100644 index 0000000..44a96d1 --- /dev/null +++ b/tigergraph/queries/tsr1.gsql @@ -0,0 +1,11 @@ +USE GRAPH ldbc_fin +CREATE QUERY tsr1( + VERTEX id +) FOR GRAPH ldbc_fin SYNTAX V1 { + Nodes = {id}; + PRINT Nodes[ + Nodes.createTime AS createTime, + Nodes.isBlocked AS isBlocked, + Nodes.accountType AS accountType + ]; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr2.gsql b/tigergraph/queries/tsr2.gsql new file mode 100644 index 0000000..131b33d --- /dev/null +++ b/tigergraph/queries/tsr2.gsql @@ -0,0 +1,37 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tsr2( + VERTEX id, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin syntax v1{ + SumAccum @@sumEdge1Amount; + MaxAccum @@maxEdge1Amount = -1; + SumAccum @@numEdge1; + SumAccum @@sumEdge2Amount; + MaxAccum @@maxEdge2Amount = -1; + SumAccum @@numEdge2; + + Start = {id}; + + R1 = + SELECT t + FROM Start:s - (transfer:e) -> Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime + ACCUM + @@sumEdge1Amount += e.amount, + @@maxEdge1Amount += e.amount, + @@numEdge1 += 1 + ; + R2 = + SELECT t + FROM Start:s - (transfer_REVERSE:e) -> Account:t + WHERE e.timestamp > startTime and e.timestamp < endTime + ACCUM + @@sumEdge2Amount += e.amount, + @@maxEdge2Amount += e.amount, + @@numEdge2 += 1 + ; + + PRINT round(@@sumEdge1Amount, 3) as sumEdge1Amount, round(@@maxEdge1Amount,3) as maxEdge1Amount, @@numEdge1 as numEdge1, + round(@@sumEdge2Amount, 3) as sumEdge2Amount, round(@@maxEdge2Amount,3) as maxEdge2Amount, @@numEdge2 as numEdge2; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr3.gsql b/tigergraph/queries/tsr3.gsql new file mode 100644 index 0000000..1bffc5f --- /dev/null +++ b/tigergraph/queries/tsr3.gsql @@ -0,0 +1,18 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tsr3(VERTEX dstId, DOUBLE threshold, UINT startTime, UINT endTime) syntax v1{ + SumAccum @@countEdge1; + int countEdge2 = dstId.outdegree("transfer_REVERSE"); + + DstAccount = {dstId}; + IF countEdge2 == 0 THEN + PRINT -1 AS blockRatio; + ELSE + Result = SELECT dst FROM DstAccount:dst - (transfer_REVERSE:e)-> Account:s + WHERE e.timestamp > startTime AND e.timestamp < endTime + AND e.amount > threshold + AND s.isBlocked + ACCUM @@countEdge1 += 1 + ; + PRINT round(@@countEdge1 * 1.0/countEdge2, 3) AS blockRatio; + END; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr4.gsql b/tigergraph/queries/tsr4.gsql new file mode 100644 index 0000000..969bfeb --- /dev/null +++ b/tigergraph/queries/tsr4.gsql @@ -0,0 +1,18 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE DISTRIBUTED QUERY tsr4(UINT dstAccountId, DOUBLE threshold, UINT startTime, UINT endTime) syntax v1{ + TYPEDEF TUPLE RESULT; + HeapAccum(sumAmount DESC, dstId ASC) @@result; + SumAccum @eCount; + SumAccum @totalAmount; + + R = SELECT src FROM Account:src -(transfer:e) -> :dst + WHERE src.id == dstAccountId + AND e.timestamp > startTime AND e.timestamp < endTime + AND e.amount > threshold + ACCUM dst.@eCount += 1, + dst.@totalAmount += e.amount + POST-ACCUM @@result += RESULT(dst.id, dst.@eCount, round(dst.@totalAmount, 3)) + ; + + PRINT @@result; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr5.gsql b/tigergraph/queries/tsr5.gsql new file mode 100644 index 0000000..89daa9c --- /dev/null +++ b/tigergraph/queries/tsr5.gsql @@ -0,0 +1,29 @@ +USE GRAPH ldbc_fin +CREATE QUERY tsr5( + VERTEX id, + DOUBLE threshold, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin SYNTAX V1 { + SumAccum @sum_count; + SumAccum @sum_amount; + Nodes = {id}; + Nodes = + SELECT t + FROM Nodes:s -(transfer_REVERSE:e)- :t + WHERE e.amount > threshold + AND e.timestamp > startTime + AND e.timestamp < endTime + ACCUM t.@sum_count += 1, + t.@sum_amount += e.amount + POST-ACCUM + t.@sum_amount = round(t.@sum_amount, 3) + ORDER BY t.@sum_amount DESC, + t.id ASC + ; + PRINT Nodes[ + Nodes.id AS srcId, + Nodes.@sum_count AS numEdges, + Nodes.@sum_amount AS sumAmount + ]; +} \ No newline at end of file diff --git a/tigergraph/queries/tsr6.gsql b/tigergraph/queries/tsr6.gsql new file mode 100644 index 0000000..282685b --- /dev/null +++ b/tigergraph/queries/tsr6.gsql @@ -0,0 +1,26 @@ +USE GRAPH ldbc_fin +CREATE QUERY tsr6( + VERTEX id, + UINT startTime, + UINT endTime +) FOR GRAPH ldbc_fin SYNTAX V1 { + Nodes = {id}; + Nodes = + SELECT t + FROM Nodes:s -(transfer_REVERSE:e)- :t + WHERE e.timestamp > startTime + AND e.timestamp < endTime + ; + Nodes = + SELECT t + FROM Nodes:s -(transfer:e)- :t + WHERE t != id + AND e.timestamp > startTime + AND e.timestamp < endTime + AND t.isBlocked == TRUE + ORDER BY t.id ASC + ; + PRINT Nodes[ + Nodes.id AS dstId + ]; +} \ No newline at end of file diff --git a/tigergraph/queries/tw1.gsql b/tigergraph/queries/tw1.gsql new file mode 100644 index 0000000..1756429 --- /dev/null +++ b/tigergraph/queries/tw1.gsql @@ -0,0 +1,8 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw1( + UINT personId, + STRING personName, + BOOL isBlocked +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Person(PRIMARY_ID, name, isBlocked) VALUES (personId, personName, isBlocked); +} \ No newline at end of file diff --git a/tigergraph/queries/tw10.gsql b/tigergraph/queries/tw10.gsql new file mode 100644 index 0000000..765a1fc --- /dev/null +++ b/tigergraph/queries/tw10.gsql @@ -0,0 +1,8 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw10( + VERTEX personId1, + VERTEX personId2, + UINT time +) FOR GRAPH ldbc_fin SYNTAX V1 { + INSERT INTO guarantee VALUES (personId1, personId2, time, _); +} \ No newline at end of file diff --git a/tigergraph/queries/tw11.gsql b/tigergraph/queries/tw11.gsql new file mode 100644 index 0000000..ba651a7 --- /dev/null +++ b/tigergraph/queries/tw11.gsql @@ -0,0 +1,7 @@ +/** +Add a guarantee edge from a Company to another Company. +*/ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw11(UINT companyId1, UINT companyId2, UINT time){ + INSERT INTO guarantee (FROM, TO, timestamp) VALUES (companyId1 Company, companyId2 Company, time); +} \ No newline at end of file diff --git a/tigergraph/queries/tw12.gsql b/tigergraph/queries/tw12.gsql new file mode 100644 index 0000000..d057dbd --- /dev/null +++ b/tigergraph/queries/tw12.gsql @@ -0,0 +1,7 @@ +/** +Add a transfer edge from an Account to another Account. +*/ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw12(UINT accountId1, UINT accountId2, UINT time, DOUBLE amount){ + INSERT INTO transfer VALUES (accountId1, accountId2, time, _, amount, _, _, _); +} \ No newline at end of file diff --git a/tigergraph/queries/tw13.gsql b/tigergraph/queries/tw13.gsql new file mode 100644 index 0000000..0d064c3 --- /dev/null +++ b/tigergraph/queries/tw13.gsql @@ -0,0 +1,9 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw13( + UINT accountId1, + UINT accountId2, + UINT time, + DOUBLE amount +) FOR GRAPH ldbc_fin SYNTAX V1 { + INSERT INTO withdraw VALUES (accountId1, accountId2, time, amount); +} \ No newline at end of file diff --git a/tigergraph/queries/tw14.gsql b/tigergraph/queries/tw14.gsql new file mode 100644 index 0000000..017324f --- /dev/null +++ b/tigergraph/queries/tw14.gsql @@ -0,0 +1,9 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw14( + UINT accountId, + UINT loanId, + UINT time, + DOUBLE amount +) FOR GRAPH ldbc_fin SYNTAX V1 { + INSERT INTO repay VALUES (accountId, loanId, time, amount); +} \ No newline at end of file diff --git a/tigergraph/queries/tw15.gsql b/tigergraph/queries/tw15.gsql new file mode 100644 index 0000000..1d4c5d2 --- /dev/null +++ b/tigergraph/queries/tw15.gsql @@ -0,0 +1,9 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw15( + UINT loanId, + UINT accountId, + UINT time, + DOUBLE amount +) FOR GRAPH ldbc_fin SYNTAX V1 { + INSERT INTO deposit VALUES (loanId, accountId, time, amount); +} \ No newline at end of file diff --git a/tigergraph/queries/tw16.gsql b/tigergraph/queries/tw16.gsql new file mode 100644 index 0000000..4b51941 --- /dev/null +++ b/tigergraph/queries/tw16.gsql @@ -0,0 +1,8 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw16( + UINT mediumId, + UINT accountId, + UINT time +) FOR GRAPH ldbc_fin SYNTAX V1 { + INSERT INTO signIn VALUES (mediumId, accountId, time, _); +} \ No newline at end of file diff --git a/tigergraph/queries/tw17.gsql b/tigergraph/queries/tw17.gsql new file mode 100644 index 0000000..0e27172 --- /dev/null +++ b/tigergraph/queries/tw17.gsql @@ -0,0 +1,19 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw17( + VERTEX accountId +) FOR GRAPH ldbc_fin SYNTAX V1 { + Nodes = {accountId}; + Tmp = + SELECT s + FROM Nodes:s -((repay|deposit_REVERSE):e)- :t + ACCUM DELETE (e) + POST-ACCUM (t) + DELETE (t) + ; + DELETE e + FROM Nodes:s -(:e)- :t + ; + DELETE s + FROM Nodes:s + ; +} \ No newline at end of file diff --git a/tigergraph/queries/tw18.gsql b/tigergraph/queries/tw18.gsql new file mode 100644 index 0000000..7ece341 --- /dev/null +++ b/tigergraph/queries/tw18.gsql @@ -0,0 +1,6 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw18( + VERTEX accountId +) FOR GRAPH ldbc_fin SYNTAX V1 { + accountId.isBlocked = TRUE; +} \ No newline at end of file diff --git a/tigergraph/queries/tw19.gsql b/tigergraph/queries/tw19.gsql new file mode 100644 index 0000000..9cb6aea --- /dev/null +++ b/tigergraph/queries/tw19.gsql @@ -0,0 +1,6 @@ +USE GRAPH ldbc_fin +CREATE QUERY tw19( + VERTEX personId +) FOR GRAPH ldbc_fin SYNTAX V1 { + personId.isBlocked = TRUE; +} \ No newline at end of file diff --git a/tigergraph/queries/tw2.gsql b/tigergraph/queries/tw2.gsql new file mode 100644 index 0000000..723ef47 --- /dev/null +++ b/tigergraph/queries/tw2.gsql @@ -0,0 +1,8 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw2( + UINT companyId, + STRING companyName, + BOOL isBlocked +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Company(PRIMARY_ID, name, isBlocked) VALUES (companyId, companyName, isBlocked); +} \ No newline at end of file diff --git a/tigergraph/queries/tw3.gsql b/tigergraph/queries/tw3.gsql new file mode 100644 index 0000000..d5dbedc --- /dev/null +++ b/tigergraph/queries/tw3.gsql @@ -0,0 +1,8 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw3( + UINT mediumId, + STRING mediumType, + BOOL isBlocked +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Medium(PRIMARY_ID, mediumType, isBlocked) VALUES (mediumId, mediumType, isBlocked); +} \ No newline at end of file diff --git a/tigergraph/queries/tw4.gsql b/tigergraph/queries/tw4.gsql new file mode 100644 index 0000000..aa9bc33 --- /dev/null +++ b/tigergraph/queries/tw4.gsql @@ -0,0 +1,11 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw4( + VERTEX personId, + UINT accountId, + UINT time, + BOOL accountBlocked, + STRING accountType +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Account(PRIMARY_ID, createTime, isBlocked, accountType) VALUES (accountId, time, accountBlocked, accountType); + INSERT INTO EDGE own VALUES (personId, accountId, time); +} \ No newline at end of file diff --git a/tigergraph/queries/tw5.gsql b/tigergraph/queries/tw5.gsql new file mode 100644 index 0000000..b63b2e0 --- /dev/null +++ b/tigergraph/queries/tw5.gsql @@ -0,0 +1,11 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw5( + VERTEX companyId, + UINT accountId, + UINT time, + BOOL accountBlocked, + STRING accountType +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Account(PRIMARY_ID, createTime, isBlocked, accountType) VALUES (accountId, time, accountBlocked, accountType); + INSERT INTO EDGE own VALUES (companyId, accountId, time); +} \ No newline at end of file diff --git a/tigergraph/queries/tw6.gsql b/tigergraph/queries/tw6.gsql new file mode 100644 index 0000000..f341785 --- /dev/null +++ b/tigergraph/queries/tw6.gsql @@ -0,0 +1,11 @@ +USE GRAPH ldbc_fin +CREATE or REPLACE QUERY tw6( + VERTEX personId, + UINT loanId, + DOUBLE loanAmount, + DOUBLE balance, + UINT time +) FOR GRAPH ldbc_fin syntax v1{ + INSERT INTO Loan (PRIMARY_ID, loanAmount, balance) VALUES (loanId, loanAmount, balance); + INSERT INTO EDGE apply VALUES (personId, loanId, time, _); +} \ No newline at end of file diff --git a/tigergraph/queries/tw7.gsql b/tigergraph/queries/tw7.gsql new file mode 100644 index 0000000..b774018 --- /dev/null +++ b/tigergraph/queries/tw7.gsql @@ -0,0 +1,5 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw7(UINT companyId, UINT loanId, DOUBLE loanAmount, DOUBLE balance, UINT time){ + INSERT INTO Loan VALUES (loanId, loanAmount, balance, _, _); + INSERT INTO apply VALUES (companyId Company, loanId Loan, time, _); +} \ No newline at end of file diff --git a/tigergraph/queries/tw8.gsql b/tigergraph/queries/tw8.gsql new file mode 100644 index 0000000..61fddff --- /dev/null +++ b/tigergraph/queries/tw8.gsql @@ -0,0 +1,4 @@ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw8(UINT personId, UINT companyId, UINT time, DOUBLE ratio){ + INSERT INTO invest VALUES (personId Person, companyId, time, ratio); +} \ No newline at end of file diff --git a/tigergraph/queries/tw9.gsql b/tigergraph/queries/tw9.gsql new file mode 100644 index 0000000..844b0a1 --- /dev/null +++ b/tigergraph/queries/tw9.gsql @@ -0,0 +1,7 @@ +/** +Add invest between Company and Company +*/ +USE GRAPH ldbc_fin +CREATE OR REPLACE QUERY tw9(UINT companyId1, UINT companyId2, UINT time, DOUBLE ratio){ + INSERT INTO invest VALUES (companyId1 Company, companyId2, time, ratio); +} \ No newline at end of file diff --git a/tigergraph/run.sh b/tigergraph/run.sh new file mode 100644 index 0000000..cb4c4ff --- /dev/null +++ b/tigergraph/run.sh @@ -0,0 +1,2 @@ +java -cp target/tigergraph-0.1.0-alpha.jar org.ldbcouncil.finbench.driver.driver.Driver -P $1 + diff --git a/tigergraph/scripts/ExprFunctions.hpp b/tigergraph/scripts/ExprFunctions.hpp new file mode 100644 index 0000000..1723dbd --- /dev/null +++ b/tigergraph/scripts/ExprFunctions.hpp @@ -0,0 +1,143 @@ +/****************************************************************************** + * Copyright (c) 2015-2016, TigerGraph Inc. + * All rights reserved. + * Project: TigerGraph Query Language + * udf.hpp: a library of user defined functions used in queries. + * + * - This library should only define functions that will be used in + * TigerGraph Query scripts. Other logics, such as structs and helper + * functions that will not be directly called in the GQuery scripts, + * must be put into "ExprUtil.hpp" under the same directory where + * this file is located. + * + * - Supported type of return value and parameters + * - int + * - float + * - double + * - bool + * - string (don't use std::string) + * - accumulators + * + * - Function names are case sensitive, unique, and can't be conflict with + * built-in math functions and reserve keywords. + * + * - Please don't remove necessary codes in this file + * + * - A backup of this file can be retrieved at + * /dev_/gdk/gsql/src/QueryUdf/ExprFunctions.hpp + * after upgrading the system. + * + ******************************************************************************/ + +#ifndef EXPRFUNCTIONS_HPP_ +#define EXPRFUNCTIONS_HPP_ + +#include +#include +#include +#include +#include +#include + +/** XXX Warning!! Put self-defined struct in ExprUtil.hpp ** + * No user defined struct, helper functions (that will not be directly called + * in the GQuery scripts) etc. are allowed in this file. This file only + * contains user-defined expression function's signature and body. + * Please put user defined structs, helper functions etc. in ExprUtil.hpp + */ +#include "ExprUtil.hpp" + +namespace UDIMPL { + typedef std::string string; //XXX DON'T REMOVE + + /****** BIULT-IN FUNCTIONS **************/ + /****** XXX DON'T REMOVE ****************/ + inline int64_t str_to_int (string str) { + return atoll(str.c_str()); + } + + inline int64_t float_to_int (float val) { + return (int64_t) val; + } + + using namespace std; + + inline uint64_t convert_time_to_uint(string str_time) { + // Convert to epoch second + tm tm_struct; + istringstream ss(str_time); + ss >> get_time(&tm_struct, "%Y-%m-%dT%H:%M:%S"); + time_t timestamp = mktime(&tm_struct); + uint64_t epoch = (uint64_t) timestamp; + + // Adjust for time zone + if (str_time.length() >= 28) { + string str_hour = str_time.substr(24, 2); + string str_minute = str_time.substr(26, 2); + uint64_t diff_second = stoi(str_hour) * 3600 + stoi(str_minute) * 60; + if (str_time.substr(23, 1) == "-") { + epoch = epoch + diff_second; + } else { + epoch = epoch - diff_second; + } + } + + // Add millisecond + uint64_t ms = 0; + if (str_time.length() >= 23) { + string str_ms = str_time.substr(20, 3); + ms = stoi(str_ms); + } + return epoch * 1000 + ms; + } + + inline bool circle_and_len_check(string str_path, string key, uint path_len, string delimiter) { + size_t pos_start = 0, pos_end, delim_len = delimiter.length(); + string token; + vector vector_str; + + while ((pos_end = str_path.find (delimiter, pos_start)) != string::npos) { + token = str_path.substr (pos_start, pos_end - pos_start); + if(token != ""){ + vector_str.push_back(token); + } + pos_start = pos_end + delim_len; + } + + token = str_path.substr (pos_start); + if(token != "") { + vector_str.push_back(token); + } + + if (vector_str.size() == path_len && find(vector_str.begin(), vector_str.end(), key) == vector_str.end()) { + return true; + } + return false; + } + + inline uint64_t get_path_len(string str_path, string delimiter) { + size_t pos_start = 0, pos_end, delim_len = delimiter.length(); + string token; + vector vector_str; + + while ((pos_end = str_path.find (delimiter, pos_start)) != string::npos) { + token = str_path.substr (pos_start, pos_end - pos_start); + if(token != ""){ + vector_str.push_back(token); + } + pos_start = pos_end + delim_len; + } + + token = str_path.substr (pos_start); + if(token != "") { + vector_str.push_back(token); + } + if (vector_str.size() >= 1) { + return vector_str.size() - 1; + } + return 0; + } +} +/****************************************/ + +#endif /* EXPRFUNCTIONS_HPP_ */ diff --git a/tigergraph/scripts/TokenBank.cpp b/tigergraph/scripts/TokenBank.cpp new file mode 100644 index 0000000..3a4457b --- /dev/null +++ b/tigergraph/scripts/TokenBank.cpp @@ -0,0 +1,64 @@ +#include +#include +#include +#include +uint32_t nextInt(char*& timestamp_ptr, const char* timestamp_end_ptr){ + uint32_t int_value = 0; + while (*timestamp_ptr >= '0' && *timestamp_ptr <= '9' + && timestamp_ptr < timestamp_end_ptr) { + int_value = int_value * 10 + *timestamp_ptr - '0'; + timestamp_ptr++; + } + timestamp_ptr++; // jump over separator + return int_value; +} + +extern "C" uint64_t ToMiliSeconds(const char* const iToken[], uint32_t iTokenLen[], uint32_t iTokenNum) { + const int mon_days[] + = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}; + int64_t tyears, tdays, leaps1, leaps2, leaps3, sign, tm_msec; + int64_t tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec; + char* timestamp_ptr_ = const_cast(iToken[0]); + char* timestamp_end_ptr_ = timestamp_ptr_ + iTokenLen[0]; + tm_year = nextInt(timestamp_ptr_, timestamp_end_ptr_); + tm_mon = nextInt(timestamp_ptr_, timestamp_end_ptr_); + tm_mday = nextInt(timestamp_ptr_, timestamp_end_ptr_); + tm_hour = nextInt(timestamp_ptr_, timestamp_end_ptr_); + tm_min = nextInt(timestamp_ptr_, timestamp_end_ptr_); + tm_sec = nextInt(timestamp_ptr_, timestamp_end_ptr_); + // adapt for time format like "2021-06-13 10:38:59" "2021-10-04 01:15:13.6" "2022-03-06 03:47:51.72" + if (iTokenLen[0] == 19) { + tm_msec = 0; + } else { + tm_msec = nextInt(timestamp_ptr_, timestamp_end_ptr_); + if (iTokenLen[0] == 21) { + tm_msec *= 100; + } else if (iTokenLen[0] == 22) { + tm_msec *= 10; + } + } + + tyears = tm_year - 1970; // base time is 1970 + sign = (tyears >= 0) ? 1 : -1; + leaps1 = (tyears + sign * 2) / 4; // no of next two lines until year 2100. + leaps2 = (tyears - 30) + / 100; // every 100 hundred years, this is an additional common year + leaps3 = (tyears - 30) + / 400; // every 400 hundred years, this is an additional leap year + tdays = mon_days[tm_mon - 1]; + tdays += tm_mday - 1; // days of month passed. + tdays = tdays + (tyears * 365) + leaps1 - leaps2 + leaps3; + if (tm_mon <= 2 && ((tyears + 2) % 4 == 0)) { + // leaf year: the 1st two months need -1 day. + if (!((tyears - 30) % 100 == 0 && (tyears - 30) % 400 != 0)) { + tdays--; + } + } + // when it is a leap year before 1970/01/01, leaps1 is one more than it should + // be. + if (sign < 0 && (tyears + 2) % 4 == 0 + && !((tyears - 30) % 100 == 0 && (tyears - 30) % 400 != 0)) { + tdays++; + } + return ((tdays * 86400) + (tm_hour * 3600) + (tm_min * 60) + tm_sec) * 1000 + tm_msec; +} \ No newline at end of file diff --git a/tigergraph/scripts/create_queries.sh b/tigergraph/scripts/create_queries.sh new file mode 100644 index 0000000..0635d01 --- /dev/null +++ b/tigergraph/scripts/create_queries.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +cwd=$(cd $(dirname $0) && pwd) +queries_path=${cwd}/../queries + +for file in "${queries_path}"/*.gsql; do + if [ -f "$file" ]; then + filename=$(basename "$file") + gsql ${queries_path}/${filename} + fi +done \ No newline at end of file diff --git a/tigergraph/scripts/install_queries.sh b/tigergraph/scripts/install_queries.sh new file mode 100644 index 0000000..618f9a9 --- /dev/null +++ b/tigergraph/scripts/install_queries.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +SINGLE_FLAG=${1:-false} + +if [ "$SINGLE_FLAG" = "true" ]; then + gsql -g ldbc_fin "install query -single all" +else + gsql -g ldbc_fin "install query -single trw2" + gsql -g ldbc_fin "set single_gpr=false install query all" +fi \ No newline at end of file diff --git a/tigergraph/scripts/load_data.sh b/tigergraph/scripts/load_data.sh new file mode 100644 index 0000000..0c781c7 --- /dev/null +++ b/tigergraph/scripts/load_data.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +DATA_SIZE=${1:-1} + +data_root="$(dirname "$0")/../data/sf${DATA_SIZE}/snapshot" + +gsql -g ldbc_fin " +RUN LOADING JOB load_fin_snapshot USING + file_Person=\"${data_root}/Person.csv\", + file_Account=\"${data_root}/Account.csv\", + file_Company=\"${data_root}/Company.csv\", + file_Loan=\"${data_root}/Loan.csv\", + file_Medium=\"${data_root}/Medium.csv\", + file_Company_Own_Account=\"${data_root}/CompanyOwnAccount.csv\", + file_Person_Own_Account=\"${data_root}/PersonOwnAccount.csv\", + file_Company_Invest=\"${data_root}/CompanyInvestCompany.csv\", + file_Person_Invest=\"${data_root}/PersonInvestCompany.csv\", + file_Person_Apply_Loan=\"${data_root}/PersonApplyLoan.csv\", + file_Company_Apply_Loan=\"${data_root}/CompanyApplyLoan.csv\", + file_Company_Guarantee=\"${data_root}/CompanyGuaranteeCompany.csv\", + file_Person_Guarantee=\"${data_root}/PersonGuaranteePerson.csv\", + file_transfer=\"${data_root}/AccountTransferAccount.csv\", + file_deposit=\"${data_root}/LoanDepositAccount.csv\", + file_repay=\"${data_root}/AccountRepayLoan.csv\", + file_withdraw=\"${data_root}/AccountWithdrawAccount.csv\", + file_signIn=\"${data_root}/MediumSignInAccount.csv\" +" \ No newline at end of file diff --git a/tigergraph/scripts/one_step_env.sh b/tigergraph/scripts/one_step_env.sh new file mode 100644 index 0000000..bd381cd --- /dev/null +++ b/tigergraph/scripts/one_step_env.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +DATA_SIZE=${1:-1} +SINGLE_FLAG=${1:-false} + +cwd=$(cd $(dirname $0) && pwd) +# 1.define schema and loading job +bash ${cwd}/setup_schema.sh + +# 2.load data into TigerGraph +bash ${cwd}/load_data.sh ${DATA_SIZE} + +# 3.create queries +bash ${cwd}/create_queries.sh + +# 4.install queries (default is single_GPR mode) +bash ${cwd}/install_queries.sh ${SINGLE_FLAG} \ No newline at end of file diff --git a/tigergraph/scripts/setup_schema.sh b/tigergraph/scripts/setup_schema.sh new file mode 100644 index 0000000..d67f210 --- /dev/null +++ b/tigergraph/scripts/setup_schema.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +cwd=$(cd $(dirname $0) && pwd) +cd $cwd + +function set_udf_put () { + gadmin config set GSQL.UDF.EnablePutTokenBank $1 + gadmin config set GSQL.UDF.EnablePutExpr $1 + gadmin config apply -y + gadmin restart -y --wait-online +} + +udfEnabled=$(gadmin config get GSQL.UDF.EnablePutExpr) +tokenBankEnabled=$(gadmin config get GSQL.UDF.EnablePutTokenBank) +if [[ $udfEnabled == "false" || $tokenBankEnabled == "false" ]]; then + set_udf_put true +fi + +gsql ' +DROP ALL + +CREATE VERTEX Person(PRIMARY_ID id UINT, name STRING, isBlocked BOOL, createTime UINT, gender STRING, birthday UINT, country STRING, city STRING)WITH primary_id_as_attribute="TRUE" +CREATE VERTEX Account(PRIMARY_ID id UINT, createTime UINT, isBlocked BOOL, accountType STRING, nickName STRING, phoneNumber STRING, email STRING, freqLoginType STRING, lastLoginTime UINT, accountLevel STRING) WITH primary_id_as_attribute="TRUE" +CREATE VERTEX Company(PRIMARY_ID id UINT, name STRING, isBlocked BOOL, createTime UINT, country STRING, city STRING, business STRING, description STRING, url STRING) WITH primary_id_as_attribute="TRUE" +CREATE VERTEX Loan(PRIMARY_ID id UINT, loanAmount DOUBLE, balance DOUBLE, usage STRING, interestRate FLOAT) WITH primary_id_as_attribute="TRUE" +CREATE VERTEX Medium(PRIMARY_ID id UINT, mediumType STRING, createTime UINT, isBlocked BOOL, lastLoginTime UINT, riskLevel STRING) WITH primary_id_as_attribute="TRUE" + +# single-edge +CREATE DIRECTED EDGE own(From Person|Company, To Account, timestamp UINT) WITH REVERSE_EDGE="own_REVERSE" +CREATE DIRECTED EDGE invest(From Person|Company, To Company, timestamp UINT, ratio FLOAT) WITH REVERSE_EDGE="invest_REVERSE" +CREATE DIRECTED EDGE apply(From Person|Company, To Loan, timestamp UINT, organization STRING) WITH REVERSE_EDGE="apply_REVERSE" +CREATE DIRECTED EDGE guarantee(FROM Person, TO Person | FROM Company, TO Company, timestamp UINT, relationship STRING) WITH REVERSE_EDGE="guarantee_REVERSE" + +# multi-edge +CREATE DIRECTED EDGE transfer(From Account, To Account, DISCRIMINATOR(timestamp UINT), orderNumber STRING, amount DOUBLE, comment STRING, payType STRING, goodsType STRING) WITH REVERSE_EDGE="transfer_REVERSE" +CREATE DIRECTED EDGE deposit(From Loan, To Account, DISCRIMINATOR(timestamp UINT), amount DOUBLE) WITH REVERSE_EDGE="deposit_REVERSE" +CREATE DIRECTED EDGE repay(From Account, To Loan, DISCRIMINATOR(timestamp UINT), amount DOUBLE) WITH REVERSE_EDGE="repay_REVERSE" +CREATE DIRECTED EDGE withdraw(From Account, To Account, DISCRIMINATOR(timestamp UINT), amount DOUBLE) WITH REVERSE_EDGE="withdraw_REVERSE" +CREATE DIRECTED EDGE signIn(From Medium, To Account, DISCRIMINATOR(timestamp UINT), location STRING) WITH REVERSE_EDGE="signIn_REVERSE" + +CREATE GRAPH ldbc_fin(*) +' + +gsql 'PUT ExprUtil FROM "./ExprFunctions.hpp"' +gsql 'PUT ExprFunctions FROM "./TokenBank.cpp"' + +gsql -g ldbc_fin ' +CREATE LOADING JOB load_fin_snapshot FOR GRAPH ldbc_fin{ + DEFINE FILENAME file_Person; + DEFINE FILENAME file_Account; + DEFINE FILENAME file_Company; + DEFINE FILENAME file_Loan; + DEFINE FILENAME file_Medium; + + DEFINE FILENAME file_Company_Own_Account; + DEFINE FILENAME file_Person_Own_Account; + DEFINE FILENAME file_Company_Invest; + DEFINE FILENAME file_Person_Invest; + DEFINE FILENAME file_Person_Apply_Loan; + DEFINE FILENAME file_Company_Apply_Loan; + DEFINE FILENAME file_Company_Guarantee; + DEFINE FILENAME file_Person_Guarantee; + + DEFINE FILENAME file_transfer; + DEFINE FILENAME file_deposit; + DEFINE FILENAME file_repay; + DEFINE FILENAME file_withdraw; + DEFINE FILENAME file_signIn; + + Load file_Person + TO VERTEX Person values($0, $1, $2, ToMiliSeconds($3), $4, ToMiliSeconds($5), $6, $7) USING header="true", separator="|"; + Load file_Account + TO VERTEX Account values($0, ToMiliSeconds($1), $2, $3, $4, $5, $6, $7, $8, $9) USING header="true", separator="|"; + Load file_Company + TO VERTEX Company values($0, $1, $2, ToMiliSeconds($3), $4, $5, $6, $7, $8) USING header="true", separator="|"; + Load file_Loan + TO VERTEX Loan values($0, $1, $2, $4, $5) USING header="true", separator="|"; + Load file_Medium + TO VERTEX Medium values($0, $1, ToMiliSeconds($3), $2, $4, $5) USING header="true", separator="|"; + + LOAD file_Company_Own_Account + TO EDGE own VALUES($0 Company, $1, ToMiliSeconds($2)) USING header="true", separator="|"; + LOAD file_Person_Own_Account + TO EDGE own VALUES($0 Person, $1, ToMiliSeconds($2)) USING header="true", separator="|"; + LOAD file_Company_Invest + TO EDGE invest VALUES($0 Company, $1, ToMiliSeconds($3), $2) USING header="true", separator="|"; + LOAD file_Person_Invest + TO EDGE invest VALUES($0 Person, $1, ToMiliSeconds($3), $2) USING header="true", separator="|"; + LOAD file_Person_Apply_Loan + TO EDGE apply VALUES($0 Person, $1, ToMiliSeconds($2), $3) USING header="true", separator="|"; + LOAD file_Company_Apply_Loan + TO EDGE apply VALUES($0 Company, $1, ToMiliSeconds($2), $3) USING header="true", separator="|"; + LOAD file_Company_Guarantee + TO EDGE guarantee VALUES($0 Company, $1 Company, ToMiliSeconds($2), $3) USING header="true", separator="|"; + LOAD file_Person_Guarantee + TO EDGE guarantee VALUES($0 Person, $1 Person, ToMiliSeconds($2), $3) USING header="true", separator="|"; + + LOAD file_transfer + TO EDGE transfer VALUES($0, $1, ToMiliSeconds($3), $4, $2, $5, $6, $7) USING header="true", separator="|"; + LOAD file_deposit + TO EDGE deposit VALUES($0, $1, ToMiliSeconds($3), $2) USING header="true", separator="|"; + LOAD file_repay + TO EDGE repay VALUES($0, $1, ToMiliSeconds($3), $2) USING header="true", separator="|"; + LOAD file_withdraw + TO EDGE withdraw VALUES($0, $1, ToMiliSeconds($3), $2) USING header="true", separator="|"; + LOAD file_signIn + TO EDGE signIn VALUES($0, $1, ToMiliSeconds($2), $3) USING header="true", separator="|"; +}' \ No newline at end of file diff --git a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java new file mode 100644 index 0000000..5945964 --- /dev/null +++ b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java @@ -0,0 +1,71 @@ +package org.ldbcouncil.finbench.impls.tigergraph; + +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.ldbcouncil.finbench.driver.DbConnectionState; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Map; + +public class TigerGraphDbConnectionState extends DbConnectionState { + static Logger logger = LogManager.getLogger("TigerGraphDbConnectionState"); + private static HikariDataSource dataSource; // Singleton instance + + private Connection conn; + + public TigerGraphDbConnectionState(Map properties) throws IOException { + if (dataSource == null) { + initializeDataSource(properties); + } + + try { + conn = dataSource.getConnection(); + } catch (SQLException e) { + throw new RuntimeException("Failed to get connection from the connection pool.", e); + } + } + + private void initializeDataSource(Map properties) { + String ipAddr = properties.get("ipAddr"); + String port = properties.get("port"); + String user = properties.get("user"); + String pass = properties.get("pass"); + String graph = properties.get("graph"); + + HikariConfig config = new HikariConfig(); + StringBuilder sb = new StringBuilder(); + sb.append("jdbc:tg:http://").append(ipAddr).append(":").append(port); + config.setJdbcUrl(sb.toString()); + config.setDriverClassName("com.tigergraph.jdbc.Driver"); + config.setUsername(user); + config.setPassword(pass); + config.addDataSourceProperty("graph", graph); + config.addDataSourceProperty("debug", 0); + + dataSource = new HikariDataSource(config); + } + + public Connection getConn() { + return conn; + } + + @Override + public void close() throws IOException { + try { + if (conn != null) { + conn.close(); + } + } catch (SQLException e) { + throw new IOException("Failed to close connection", e); + } + + // Close the data source after all connections have been closed + if (dataSource != null) { + dataSource.close(); + } + } +} \ No newline at end of file diff --git a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java new file mode 100644 index 0000000..a9acb9f --- /dev/null +++ b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java @@ -0,0 +1,1163 @@ +package org.ldbcouncil.finbench.impls.tigergraph; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.ldbcouncil.finbench.driver.*; +import org.ldbcouncil.finbench.driver.log.LoggingService; +import org.ldbcouncil.finbench.driver.result.Path; +import org.ldbcouncil.finbench.driver.truncation.TruncationOrder; +import org.ldbcouncil.finbench.driver.workloads.transaction.LdbcNoResult; +import org.ldbcouncil.finbench.driver.workloads.transaction.queries.*; +import org.ldbcouncil.finbench.impls.dummy.DummyDb; + +import java.io.IOException; +import java.math.BigDecimal; +import java.sql.*; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; + +public class TigerGraphTransactionDb extends Db { + static Logger logger = LogManager.getLogger("TigerGraph"); + private static final int QUERY_TIMEOUT_SECONDS = 60; + + TigerGraphDbConnectionState dcs; + + @Override + protected void onInit(Map properties, LoggingService loggingService) throws DbException { + logger.info("TigerGraphTransactionDb initialized"); + + try { + dcs = new TigerGraphDbConnectionState(properties); + } catch (IOException e) { + e.printStackTrace(); + } + + // complex reads + registerOperationHandler(ComplexRead1.class, ComplexRead1Handler.class); + registerOperationHandler(ComplexRead2.class, ComplexRead2Handler.class); + registerOperationHandler(ComplexRead3.class, ComplexRead3Handler.class); + registerOperationHandler(ComplexRead4.class, ComplexRead4Handler.class); + registerOperationHandler(ComplexRead5.class, ComplexRead5Handler.class); + registerOperationHandler(ComplexRead6.class, ComplexRead6Handler.class); + registerOperationHandler(ComplexRead7.class, ComplexRead7Handler.class); + registerOperationHandler(ComplexRead8.class, ComplexRead8Handler.class); + registerOperationHandler(ComplexRead9.class, ComplexRead9Handler.class); + registerOperationHandler(ComplexRead10.class, ComplexRead10Handler.class); + registerOperationHandler(ComplexRead11.class, ComplexRead11Handler.class); + registerOperationHandler(ComplexRead12.class, ComplexRead12Handler.class); + + // simple reads + registerOperationHandler(SimpleRead1.class, SimpleRead1Handler.class); + registerOperationHandler(SimpleRead2.class, SimpleRead2Handler.class); + registerOperationHandler(SimpleRead3.class, SimpleRead3Handler.class); + registerOperationHandler(SimpleRead4.class, SimpleRead4Handler.class); + registerOperationHandler(SimpleRead5.class, SimpleRead5Handler.class); + registerOperationHandler(SimpleRead6.class, SimpleRead6Handler.class); + + // writes + registerOperationHandler(Write1.class, Write1Handler.class); + registerOperationHandler(Write2.class, Write2Handler.class); + registerOperationHandler(Write3.class, Write3Handler.class); + registerOperationHandler(Write4.class, Write4Handler.class); + registerOperationHandler(Write5.class, Write5Handler.class); + registerOperationHandler(Write6.class, Write6Handler.class); + registerOperationHandler(Write7.class, Write7Handler.class); + registerOperationHandler(Write8.class, Write8Handler.class); + registerOperationHandler(Write9.class, Write9Handler.class); + registerOperationHandler(Write10.class, Write10Handler.class); + registerOperationHandler(Write11.class, Write11Handler.class); + registerOperationHandler(Write12.class, Write12Handler.class); + registerOperationHandler(Write13.class, Write13Handler.class); + registerOperationHandler(Write14.class, Write14Handler.class); + registerOperationHandler(Write15.class, Write15Handler.class); + registerOperationHandler(Write16.class, Write16Handler.class); + registerOperationHandler(Write17.class, Write17Handler.class); + registerOperationHandler(Write18.class, Write18Handler.class); + registerOperationHandler(Write19.class, Write19Handler.class); + + // read-writes + registerOperationHandler(ReadWrite1.class, ReadWrite1Handler.class); + registerOperationHandler(ReadWrite2.class, ReadWrite2Handler.class); + registerOperationHandler(ReadWrite3.class, ReadWrite3Handler.class); + } + + @Override + protected void onClose() throws IOException { + logger.info("TigerGraphTransactionDb closed"); + dcs.close(); + } + + @Override + protected DbConnectionState getConnectionState() throws DbException { + return dcs; + } + + private static double getRoundedDouble(ResultSet rs, String columnName) throws SQLException { + return rs.getBigDecimal(columnName).setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue(); + } + + public static class ComplexRead1Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead1 cr1, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr1(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr1.getId()); + pstmt.setLong(2, cr1.getStartTime().getTime()); + pstmt.setLong(3, cr1.getEndTime().getTime()); + pstmt.setInt(4, cr1.getTruncationLimit()); + pstmt.setString(5, String.valueOf(cr1.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + + while (rs.next()) { + ComplexRead1Result rowResult = new ComplexRead1Result( + rs.getLong("otherId"), + rs.getInt("accountDistance"), + rs.getLong("mediumId"), + rs.getString("mediumType")); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr1); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead2Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead2 cr2, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr2(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr2.getId()); + pstmt.setLong(2, cr2.getStartTime().getTime()); + pstmt.setLong(3, cr2.getEndTime().getTime()); + pstmt.setInt(4, cr2.getTruncationLimit()); + pstmt.setString(5, String.valueOf(cr2.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead2Result rowResult = new ComplexRead2Result( + rs.getLong("otherId"), + getRoundedDouble(rs,"sumLoanAmount"), + getRoundedDouble(rs,"sumLoanBalance") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr2); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + public static class ComplexRead3Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead3 cr3, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr3(id1=?, id2=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr3.getId1()); + pstmt.setLong(2, cr3.getId2()); + pstmt.setLong(3, cr3.getStartTime().getTime()); + pstmt.setLong(4, cr3.getEndTime().getTime()); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead3Result rowResult = new ComplexRead3Result(rs.getLong("shortestPathLength")); + results.add(rowResult); + } + + resultReporter.report(results.size(), results, cr3); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + public static class ComplexRead4Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead4 cr4, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr4(id1=?, id2=?, startTime=?, endTime=?)"; + long startTime = cr4.getStartTime().getTime(); + long endTime = cr4.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr4.getId1()); + pstmt.setLong(2, cr4.getId2()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead4Result rowResult = new ComplexRead4Result( + rs.getLong("otherId"), + rs.getLong("numEdge2"), + getRoundedDouble(rs, "sumEdge2Amount"), + getRoundedDouble(rs, "maxEdge2Amount"), + rs.getLong("numEdge3"), + getRoundedDouble(rs, "sumEdge3Amount"), + getRoundedDouble(rs, "maxEdge3Amount") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr4); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead5Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead5 cr5, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr5(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + long startTime = cr5.getStartTime().getTime(); + long endTime = cr5.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr5.getId()); + pstmt.setLong(2, startTime); + pstmt.setLong(3, endTime); + pstmt.setInt(4, cr5.getTruncationLimit()); + pstmt.setString(5, String.valueOf(cr5.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + Array pathArray = rs.getArray("path"); + Object[] arrayContent = (Object[]) pathArray.getArray(); + for (Object element : arrayContent) { + String[] numberStrings = element.toString().split("->"); + long[] longArray = new long[numberStrings.length]; + for (int i = 0; i < numberStrings.length; i++) { + longArray[i] = Long.parseLong(numberStrings[i]); + } + Path path = new Path(); + for (long num : longArray) { + path.addId(num); + } + ComplexRead5Result result = new ComplexRead5Result(path); + results.add(result); + } + } + resultReporter.report(results.size(), results, cr5); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + public static class ComplexRead6Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead6 cr6, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr6(accountId=?, threshold1=?, threshold2=?, startTime=?, endTime=?, " + + "truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr6.getId()); + pstmt.setDouble(2, cr6.getThreshold1()); + pstmt.setDouble(3, cr6.getThreshold2()); + pstmt.setLong(4, cr6.getStartTime().getTime()); + pstmt.setLong(5, cr6.getEndTime().getTime()); + pstmt.setInt(6, cr6.getTruncationLimit()); + pstmt.setString(7, String.valueOf(cr6.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead6Result rowResult = new ComplexRead6Result( + rs.getLong("midId"), + getRoundedDouble(rs, "sumEdge1Amount"), + getRoundedDouble(rs, "sumEdge2Amount") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr6); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead7Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead7 cr7, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr7(accountId=?, threshold=?, startTime=?, endTime=?, " + + "truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr7.getId()); + pstmt.setDouble(2, cr7.getThreshold()); + pstmt.setLong(3, cr7.getStartTime().getTime()); + pstmt.setLong(4, cr7.getEndTime().getTime()); + pstmt.setInt(5, cr7.getTruncationLimit()); + pstmt.setString(6, String.valueOf(cr7.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead7Result rowResult = new ComplexRead7Result( + rs.getInt("numSrc"), + rs.getInt("numDst"), + rs.getFloat("inOutRatio")); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr7); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead8Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead8 cr8, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr8(loanId=?, threshold=?, startTime=?, endTime=?, " + + "truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr8.getId()); + pstmt.setDouble(2, cr8.getThreshold()); + pstmt.setLong(3, cr8.getStartTime().getTime()); + pstmt.setLong(4, cr8.getEndTime().getTime()); + pstmt.setInt(5, cr8.getTruncationLimit()); + pstmt.setString(6, String.valueOf(cr8.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead8Result rowResult = new ComplexRead8Result( + rs.getLong("dstId"), + rs.getFloat("final_ratio"), + rs.getInt("distanceFromLoan")); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr8); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead9Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead9 cr9, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr9(id=?, threshold=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + long startTime = cr9.getStartTime().getTime(); + long endTime = cr9.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr9.getId()); + pstmt.setDouble(2, cr9.getThreshold()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + pstmt.setInt(5, cr9.getTruncationLimit()); + pstmt.setString(6, String.valueOf(cr9.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead9Result rowResult = new ComplexRead9Result( + rs.getFloat("ratioRepay"), + rs.getFloat("ratioDeposit"), + rs.getFloat("ratioTransfer")); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr9); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead10Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead10 cr10, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr10(pid1=?, pid2=?, startTime=?, endTime=?)"; + long startTime = cr10.getStartTime().getTime(); + long endTime = cr10.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr10.getPid1()); + pstmt.setLong(2, cr10.getPid2()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead10Result rowResult = new ComplexRead10Result( + rs.getFloat("jaccardSimilarity") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr10); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead11Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead11 cr11, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr11(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + long startTime = cr11.getStartTime().getTime(); + long endTime = cr11.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr11.getId()); + pstmt.setLong(2, startTime); + pstmt.setLong(3, endTime); + pstmt.setInt(4, cr11.getTruncationLimit()); + pstmt.setString(5, String.valueOf(cr11.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead11Result rowResult = new ComplexRead11Result( + getRoundedDouble(rs, "sumLoanAmount"), + rs.getInt("numLoans") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr11); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ComplexRead12Handler implements OperationHandler { + @Override + public void executeOperation(ComplexRead12 cr12, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tcr12(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; + long startTime = cr12.getStartTime().getTime(); + long endTime = cr12.getEndTime().getTime(); + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, cr12.getId()); + pstmt.setLong(2, startTime); + pstmt.setLong(3, endTime); + pstmt.setInt(4, cr12.getTruncationLimit()); + pstmt.setString(5, String.valueOf(cr12.getTruncationOrder())); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + ComplexRead12Result rowResult = new ComplexRead12Result( + rs.getLong("compAccountId"), + getRoundedDouble(rs, "sumEdge2Amount") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, cr12); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead1Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead1 sr1, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr1(id=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr1.getId()); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + SimpleRead1Result rowResult = new SimpleRead1Result( + new Date(rs.getLong("createTime")), + rs.getBoolean("isBlocked"), + rs.getString("accountType")); + results.add(rowResult); + } + resultReporter.report(results.size(), results, sr1); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead2Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead2 sr2, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr2(id=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + long startTime = sr2.getStartTime().getTime(); + long endTime = sr2.getEndTime().getTime(); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr2.getId()); + pstmt.setLong(2, startTime); + pstmt.setLong(3, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + SimpleRead2Result rowResult = new SimpleRead2Result( + getRoundedDouble(rs, "sumEdge1Amount"), + getRoundedDouble(rs, "maxEdge1Amount"), + rs.getLong("numEdge1"), + getRoundedDouble(rs, "sumEdge2Amount"), + getRoundedDouble(rs, "maxEdge2Amount"), + rs.getLong("numEdge2") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, sr2); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead3Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead3 sr3, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr3(dstId=?, threshold=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + long startTime = sr3.getStartTime().getTime(); + long endTime = sr3.getEndTime().getTime(); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr3.getId()); + pstmt.setDouble(2, sr3.getThreshold()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + SimpleRead3Result rowResult = new SimpleRead3Result( + rs.getFloat("blockRatio") + ); + results.add(rowResult); + } + resultReporter.report(results.size(), results, sr3); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead4Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead4 sr4, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr4(dstAccountId=?, threshold=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + long startTime = sr4.getStartTime().getTime(); + long endTime = sr4.getEndTime().getTime(); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr4.getId()); + pstmt.setDouble(2, sr4.getThreshold()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + if (rs.getObject("@@result") == null) { + SimpleRead4Result rowResult = new SimpleRead4Result( + rs.getLong("dstId"), + rs.getInt("numEdges"), + getRoundedDouble(rs, "sumAmount") + ); + results.add(rowResult); + } + } + resultReporter.report(results.size(), results, sr4); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead5Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead5 sr5, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr5(id=?, threshold=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + long startTime = sr5.getStartTime().getTime(); + long endTime = sr5.getEndTime().getTime(); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr5.getId()); + pstmt.setDouble(2, sr5.getThreshold()); + pstmt.setLong(3, startTime); + pstmt.setLong(4, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + if (rs.getObject("Nodes") == null) { + SimpleRead5Result rowResult = new SimpleRead5Result( + rs.getLong("srcId"), + rs.getInt("numEdges"), + getRoundedDouble(rs, "sumAmount") + ); + results.add(rowResult); + } + } + resultReporter.report(results.size(), results, sr5); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class SimpleRead6Handler implements OperationHandler { + @Override + public void executeOperation(SimpleRead6 sr6, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tsr6(id=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + long startTime = sr6.getStartTime().getTime(); + long endTime = sr6.getEndTime().getTime(); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, sr6.getId()); + pstmt.setLong(2, startTime); + pstmt.setLong(3, endTime); + + ResultSet rs = pstmt.executeQuery(); + List results = new ArrayList<>(); + while (rs.next()) { + if (rs.getObject("Nodes") == null) { + SimpleRead6Result rowResult = new SimpleRead6Result( + rs.getLong("dstId") + ); + results.add(rowResult); + } + } + resultReporter.report(results.size(), results, sr6); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write1Handler implements OperationHandler { + @Override + public void executeOperation(Write1 w1, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw1(personId=?, personName=?, isBlocked=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w1.getPersonId()); + pstmt.setString(2, w1.getPersonName()); + pstmt.setBoolean(3, w1.getIsBlocked()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w1); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write2Handler implements OperationHandler { + @Override + public void executeOperation(Write2 w2, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw2(companyId=?, companyName=?, isBlocked=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w2.getCompanyId()); + pstmt.setString(2, w2.getCompanyName()); + pstmt.setBoolean(3, w2.getIsBlocked()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w2); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write3Handler implements OperationHandler { + @Override + public void executeOperation(Write3 w3, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw3(mediumId=?, mediumType=?, isBlocked=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w3.getMediumId()); + pstmt.setString(2, w3.getMediumType()); + pstmt.setBoolean(3, w3.getIsBlocked()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w3); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write4Handler implements OperationHandler { + @Override + public void executeOperation(Write4 w4, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw4(personId=?, accountId=?, time=?, accountBlocked=?, accountType=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w4.getPersonId()); + pstmt.setLong(2, w4.getAccountId()); + pstmt.setLong(3, w4.getTime().getTime()); + pstmt.setBoolean(4, w4.getAccountBlocked()); + pstmt.setString(5, w4.getAccountType()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w4); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write5Handler implements OperationHandler { + @Override + public void executeOperation(Write5 w5, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw5(companyId=?, accountId=?, time=?, accountBlocked=?, accountType=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w5.getCompanyId()); + pstmt.setLong(2, w5.getAccountId()); + pstmt.setLong(3, w5.getTime().getTime()); + pstmt.setBoolean(4, w5.getAccountBlocked()); + pstmt.setString(5, w5.getAccountType()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w5); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write6Handler implements OperationHandler { + @Override + public void executeOperation(Write6 w6, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw6(personId=?, loanId=?, loanAmount=?, balance=?, time=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w6.getPersonId()); + pstmt.setLong(2, w6.getLoanId()); + pstmt.setDouble(3, w6.getLoanAmount()); + pstmt.setDouble(4, w6.getBalance()); + pstmt.setLong(5, w6.getTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w6); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write7Handler implements OperationHandler { + @Override + public void executeOperation(Write7 w7, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw7(companyId=?, loanId=?, loanAmount=?, balance=?, time=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w7.getCompanyId()); + pstmt.setLong(2, w7.getLoanId()); + pstmt.setDouble(3, w7.getLoanAmount()); + pstmt.setDouble(4, w7.getBalance()); + pstmt.setLong(5, w7.getTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w7); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write8Handler implements OperationHandler { + @Override + public void executeOperation(Write8 w8, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw8(personId=?, companyId=?, time=?, ratio=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w8.getPersonId()); + pstmt.setLong(2, w8.getCompanyId()); + pstmt.setLong(3, w8.getTime().getTime()); + pstmt.setDouble(4, w8.getRatio()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w8); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write9Handler implements OperationHandler { + @Override + public void executeOperation(Write9 w9, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw9(companyId1=?, companyId2=?, time=?, ratio=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w9.getCompanyId1()); + pstmt.setLong(2, w9.getCompanyId2()); + pstmt.setLong(3, w9.getTime().getTime()); + pstmt.setDouble(4, w9.getRatio()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w9); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write10Handler implements OperationHandler { + @Override + public void executeOperation(Write10 w10, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw10(personId1=?, personId2=?, time=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w10.getPersonId1()); + pstmt.setLong(2, w10.getPersonId2()); + pstmt.setLong(3, w10.getTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w10); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write11Handler implements OperationHandler { + @Override + public void executeOperation(Write11 w11, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw11(companyId1=?, companyId2=?, time=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w11.getCompanyId1()); + pstmt.setLong(2, w11.getCompanyId2()); + pstmt.setLong(3, w11.getTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w11); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write12Handler implements OperationHandler { + @Override + public void executeOperation(Write12 w12, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw12(accountId1=?, accountId2=?, time=?, amount=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w12.getAccountId1()); + pstmt.setLong(2, w12.getAccountId2()); + pstmt.setLong(3, w12.getTime().getTime()); + pstmt.setDouble(4, w12.getAmount()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w12); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write13Handler implements OperationHandler { + @Override + public void executeOperation(Write13 w13, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw13(accountId1=?, accountId2=?, time=?, amount=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w13.getAccountId1()); + pstmt.setLong(2, w13.getAccountId2()); + pstmt.setLong(3, w13.getTime().getTime()); + pstmt.setDouble(4, w13.getAmount()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w13); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write14Handler implements OperationHandler { + @Override + public void executeOperation(Write14 w14, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw14(accountId=?, loanId=?, time=?, amount=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w14.getAccountId()); + pstmt.setLong(2, w14.getLoanId()); + pstmt.setLong(3, w14.getTime().getTime()); + pstmt.setDouble(4, w14.getAmount()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w14); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write15Handler implements OperationHandler { + @Override + public void executeOperation(Write15 w15, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw15(loanId=?, accountId=?, time=?, amount=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w15.getLoanId()); + pstmt.setLong(2, w15.getAccountId()); + pstmt.setLong(3, w15.getTime().getTime()); + pstmt.setDouble(4, w15.getAmount()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w15); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write16Handler implements OperationHandler { + @Override + public void executeOperation(Write16 w16, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw16(mediumId=?, accountId=?, time=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w16.getMediumId()); + pstmt.setLong(2, w16.getAccountId()); + pstmt.setLong(3, w16.getTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w16); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write17Handler implements OperationHandler { + @Override + public void executeOperation(Write17 w17, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw17(accountId=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w17.getAccountId()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w17); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write18Handler implements OperationHandler { + @Override + public void executeOperation(Write18 w18, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw18(accountId=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w18.getAccountId()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w18); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class Write19Handler implements OperationHandler { + @Override + public void executeOperation(Write19 w19, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN tw19(personId=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, w19.getPersonId()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, w19); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ReadWrite1Handler implements OperationHandler { + @Override + public void executeOperation(ReadWrite1 rw1, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN trw1(srcId=?, dstId=?, time=?, amount=?, startTime=?, endTime=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, rw1.getSrcId()); + pstmt.setLong(2, rw1.getDstId()); + pstmt.setLong(3, rw1.getTime().getTime()); + pstmt.setDouble(4, rw1.getAmount()); + pstmt.setLong(5, rw1.getStartTime().getTime()); + pstmt.setLong(6, rw1.getEndTime().getTime()); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, rw1); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ReadWrite2Handler implements OperationHandler { + @Override + public void executeOperation(ReadWrite2 rw2, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN trw2(srcId=?, dstId=?, time=?, startTime=?, endTime=?, " + + "amount=?, amountThreshold=?, ratioThreshold=?, " + + "truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, rw2.getSrcId()); + pstmt.setLong(2, rw2.getDstId()); + pstmt.setLong(3, rw2.getTime().getTime()); + pstmt.setLong(4, rw2.getStartTime().getTime()); + pstmt.setLong(5, rw2.getEndTime().getTime()); + pstmt.setDouble(6, rw2.getAmount()); + pstmt.setDouble(7, rw2.getAmountThreshold()); + pstmt.setDouble(8, rw2.getRatioThreshold()); + pstmt.setInt(9, rw2.getTruncationLimit()); + pstmt.setString(10, String.valueOf(rw2.getTruncationOrder())); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, rw2); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } + + public static class ReadWrite3Handler implements OperationHandler { + @Override + public void executeOperation(ReadWrite3 rw3, TigerGraphDbConnectionState dbConnectionState, + ResultReporter resultReporter) throws DbException { + try { + Connection conn = dbConnectionState.getConn(); + String query = "RUN trw3(srcId=?, dstId=?, time=?, threshold=?, startTime=?, endTime=?, " + + "truncationLimit=?, truncationOrder=?)"; + PreparedStatement pstmt = conn.prepareStatement(query); + pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); + pstmt.setLong(1, rw3.getSrcId()); + pstmt.setLong(2, rw3.getDstId()); + pstmt.setLong(3, rw3.getTime().getTime()); + pstmt.setDouble(4, rw3.getThreshold()); + pstmt.setLong(5, rw3.getStartTime().getTime()); + pstmt.setLong(6, rw3.getEndTime().getTime()); + pstmt.setInt(7, rw3.getTruncationLimit()); + pstmt.setString(8, String.valueOf(rw3.getTruncationOrder())); + pstmt.executeQuery(); + resultReporter.report(0, LdbcNoResult.INSTANCE, rw3); + } catch (SQLException e) { + logger.error("Failed to createStatement: " + e); + } + } + } +} \ No newline at end of file diff --git a/tigergraph/validate_database.properties b/tigergraph/validate_database.properties new file mode 100644 index 0000000..079a5f0 --- /dev/null +++ b/tigergraph/validate_database.properties @@ -0,0 +1,93 @@ +############################################################ +# SUT defined configurations # +############################################################ +ipAddr=34.41.176.245 +port=14240 +user=tigergraph +pass=tigergraph +graph=ldbc_fin + +############################################################ +# Driver configurations # +############################################################ +status=1 +thread_count=16 +name=LDBC-FinBench +# Modes available: 1.CREATE_VALIDATION 2.VALIDATE_DATABASE 3.EXECUTE_BENCHMARK +mode=VALIDATE_DATABASE +results_log=false +time_unit=MICROSECONDS +time_compression_ratio=1 +peer_identifiers= +workload_statistics=false +spinner_wait_duration=1 +help=false +ignore_scheduled_start_times=false +workload=org.ldbcouncil.finbench.driver.workloads.transaction.LdbcFinBenchTransactionWorkload +db=org.ldbcouncil.finbench.impls.tigergraph.TigerGraphTransactionDb +operation_count=1000000 +validation_parameters_size=1000 +validate_workload=true +validate_database=validation_params.csv +warmup=0 +ldbc.finbench.transaction.queries.parameters_dir=data/sf1/read_params +ldbc.finbench.transaction.queries.updates_dir=data/sf1/incremental +# param and update files suffix, `csv` or `parquet`, default is `csv` +ldbc.finbench.transaction.queries.files_suffix=csv +ldbc.finbench.transaction.queries.simple_read_dissipation=0.2 +ldbc.finbench.transaction.queries.update_interleave=2147483647 +ldbc.finbench.transaction.queries.scale_factor=1 +# Frequency of complex read queries +ldbc.finbench.transaction.queries.ComplexRead1_freq=1 +ldbc.finbench.transaction.queries.ComplexRead2_freq=1 +ldbc.finbench.transaction.queries.ComplexRead3_freq=1 +ldbc.finbench.transaction.queries.ComplexRead4_freq=1 +ldbc.finbench.transaction.queries.ComplexRead5_freq=1 +ldbc.finbench.transaction.queries.ComplexRead6_freq=1 +ldbc.finbench.transaction.queries.ComplexRead7_freq=1 +ldbc.finbench.transaction.queries.ComplexRead8_freq=1 +ldbc.finbench.transaction.queries.ComplexRead9_freq=1 +ldbc.finbench.transaction.queries.ComplexRead10_freq=1 +ldbc.finbench.transaction.queries.ComplexRead11_freq=1 +ldbc.finbench.transaction.queries.ComplexRead12_freq=1 +# For debugging purposes +ldbc.finbench.transaction.queries.ComplexRead1_enable=true +ldbc.finbench.transaction.queries.ComplexRead2_enable=true +ldbc.finbench.transaction.queries.ComplexRead3_enable=true +ldbc.finbench.transaction.queries.ComplexRead4_enable=true +ldbc.finbench.transaction.queries.ComplexRead5_enable=true +ldbc.finbench.transaction.queries.ComplexRead6_enable=true +ldbc.finbench.transaction.queries.ComplexRead7_enable=true +ldbc.finbench.transaction.queries.ComplexRead8_enable=true +ldbc.finbench.transaction.queries.ComplexRead9_enable=true +ldbc.finbench.transaction.queries.ComplexRead10_enable=true +ldbc.finbench.transaction.queries.ComplexRead11_enable=true +ldbc.finbench.transaction.queries.ComplexRead12_enable=true +ldbc.finbench.transaction.queries.SimpleRead1_enable=true +ldbc.finbench.transaction.queries.SimpleRead2_enable=true +ldbc.finbench.transaction.queries.SimpleRead3_enable=true +ldbc.finbench.transaction.queries.SimpleRead4_enable=true +ldbc.finbench.transaction.queries.SimpleRead5_enable=true +ldbc.finbench.transaction.queries.SimpleRead6_enable=true +ldbc.finbench.transaction.queries.Write1_enable=true +ldbc.finbench.transaction.queries.Write2_enable=true +ldbc.finbench.transaction.queries.Write3_enable=true +ldbc.finbench.transaction.queries.Write4_enable=true +ldbc.finbench.transaction.queries.Write5_enable=true +ldbc.finbench.transaction.queries.Write6_enable=true +ldbc.finbench.transaction.queries.Write7_enable=true +ldbc.finbench.transaction.queries.Write8_enable=true +ldbc.finbench.transaction.queries.Write9_enable=true +ldbc.finbench.transaction.queries.Write10_enable=true +ldbc.finbench.transaction.queries.Write11_enable=true +ldbc.finbench.transaction.queries.Write12_enable=true +ldbc.finbench.transaction.queries.Write13_enable=true +ldbc.finbench.transaction.queries.Write14_enable=true +ldbc.finbench.transaction.queries.Write15_enable=true +ldbc.finbench.transaction.queries.Write16_enable=true +ldbc.finbench.transaction.queries.Write17_enable=true +ldbc.finbench.transaction.queries.Write18_enable=true +ldbc.finbench.transaction.queries.Write19_enable=true +ldbc.finbench.transaction.queries.ReadWrite1_enable=true +ldbc.finbench.transaction.queries.ReadWrite2_enable=true +ldbc.finbench.transaction.queries.ReadWrite3_enable=true From c419f2fa12aed9548d2ff92765c3b5184345f2c8 Mon Sep 17 00:00:00 2001 From: Yiping Xiong Date: Fri, 15 Sep 2023 11:11:26 +0800 Subject: [PATCH 2/9] modify README.md --- tigergraph/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tigergraph/README.md b/tigergraph/README.md index e8efb15..0b96a4e 100644 --- a/tigergraph/README.md +++ b/tigergraph/README.md @@ -36,7 +36,7 @@ mvn clean package ``` ## 2. DDL & Loading & Queries -Please make sure that you already installed TigerGraph, and logged in as the user can run TigerGraph. +Ensure that you have already installed TigerGraph and authenticated as a user with the necessary privileges to run TigerGraph. ### 2.1 Create schema and loading job ```bash bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/setup_schema.sh @@ -58,6 +58,7 @@ $ tree -L 2 ~/ldbc_finbench_transaction_impls/tigergraph/data ├── raw ├── read_params └── snapshot + ... ``` #### 2.2.2 Data Loading @@ -72,6 +73,7 @@ bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/install_queries.sh ``` ### 2.4 one-step setup +If you prefer to avoid separately performing steps 2.1, 2.2, and 2.3, you can use the convenient ```one_step_env.sh``` script, which automates the setup process of creating schemas, loading data, and preparing queries. ```bash bash ~/ldbc_finbench_transaction_impls/tigergraph/scripts/one_step_env.sh ``` From 18c62039306f875b3cb60747ec0cceb57558f18d Mon Sep 17 00:00:00 2001 From: Yiping Xiong Date: Fri, 15 Sep 2023 11:46:29 +0800 Subject: [PATCH 3/9] modify properties file --- tigergraph/validate_database.properties | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tigergraph/validate_database.properties b/tigergraph/validate_database.properties index 079a5f0..ac6d138 100644 --- a/tigergraph/validate_database.properties +++ b/tigergraph/validate_database.properties @@ -26,7 +26,7 @@ ignore_scheduled_start_times=false workload=org.ldbcouncil.finbench.driver.workloads.transaction.LdbcFinBenchTransactionWorkload db=org.ldbcouncil.finbench.impls.tigergraph.TigerGraphTransactionDb operation_count=1000000 -validation_parameters_size=1000 +validation_parameters_size=10000 validate_workload=true validate_database=validation_params.csv warmup=0 From 22ce467b86fd802543f0ff39826fc5d809f87ac6 Mon Sep 17 00:00:00 2001 From: Yiping Xiong Date: Tue, 19 Sep 2023 17:58:23 +0800 Subject: [PATCH 4/9] modify trw2.gsql --- tigergraph/queries/trw2.gsql | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tigergraph/queries/trw2.gsql b/tigergraph/queries/trw2.gsql index 3e30e32..543148b 100644 --- a/tigergraph/queries/trw2.gsql +++ b/tigergraph/queries/trw2.gsql @@ -19,8 +19,6 @@ CREATE OR REPLACE QUERY trw2( RETURN; END; - INSERT INTO transfer VALUES (srcId, dstId, time, _, amount, _, _, _); - transfersIn = SELECT t FROM Nodes:t - (transfer_REVERSE:e1) -> Account:s WHERE e1.amount > amountThreshold AND e1.timestamp > startTime @@ -38,6 +36,10 @@ CREATE OR REPLACE QUERY trw2( Nodes = SELECT s FROM Nodes:s POST-ACCUM - s.isBlocked = (s.@outCount > 0 AND s.@totalE1Amount/s.@totalE2Amount <= ratioThreshold) OR (s.@outCount == 0 AND ratioThreshold >= -1) + IF (s.@outCount > 0 AND s.@totalE1Amount/s.@totalE2Amount > ratioThreshold) THEN + INSERT INTO transfer VALUES (srcId, dstId, time, _, amount, _, _, _) + ELSE + s.isBlocked = true + END ; } \ No newline at end of file From 64ac6b392ecebd21d1dc719ffb83c3265e283de8 Mon Sep 17 00:00:00 2001 From: Yongchao Liu <371063912@qq.com> Date: Wed, 20 Sep 2023 16:32:57 +0800 Subject: [PATCH 5/9] using db connection pool instead of reuse one connection --- tigergraph/benchmark.properties | 1 + tigergraph/create_validation.properties | 1 + tigergraph/queries/tcr1.gsql | 33 +- .../TigerGraphDbConnectionState.java | 22 +- .../tigergraph/TigerGraphTransactionDb.java | 399 +++++++++++++----- tigergraph/validate_database.properties | 1 + 6 files changed, 323 insertions(+), 134 deletions(-) diff --git a/tigergraph/benchmark.properties b/tigergraph/benchmark.properties index ac9d6ec..c93a114 100644 --- a/tigergraph/benchmark.properties +++ b/tigergraph/benchmark.properties @@ -6,6 +6,7 @@ port=14240 user=tigergraph pass=tigergraph graph=ldbc_fin +maxPoolSize=160 ############################################################ # Driver configurations # diff --git a/tigergraph/create_validation.properties b/tigergraph/create_validation.properties index 913cc9b..445ca9c 100644 --- a/tigergraph/create_validation.properties +++ b/tigergraph/create_validation.properties @@ -6,6 +6,7 @@ port=14240 user=tigergraph pass=tigergraph graph=ldbc_fin +maxPoolSize=160 ############################################################ # Driver configurations # diff --git a/tigergraph/queries/tcr1.gsql b/tigergraph/queries/tcr1.gsql index b3f5917..e7f6104 100644 --- a/tigergraph/queries/tcr1.gsql +++ b/tigergraph/queries/tcr1.gsql @@ -1,32 +1,31 @@ -USE GRAPH ldbc_fin CREATE or REPLACE QUERY tcr1( - VERTEX id, + VERTEX id, UINT startTime, UINT endTime, - INT truncationLimit, + INT truncationLimit, STRING truncationOrder ) FOR GRAPH ldbc_fin syntax v1{ TYPEDEF TUPLE RESULT; SetAccum @result_set; HeapAccum(accountDistance ASC, otherId ASC, mediumId ASC) @@result; - MapAccum> @min_time_map; + ArrayAccum> @min_times[4]; StepNodes = {id}; - TMP = SELECT s from StepNodes:s POST-ACCUM s.@min_time_map += (0 -> 0); - + TMP = SELECT s from StepNodes:s POST-ACCUM s.@min_times[0] += 0; + INT i = 1; - + WHILE StepNodes.size() > 0 and i <= 3 DO - StepNodes = - SELECT t + StepNodes = + SELECT FROM StepNodes:s - (transfer:e) -> Account:t WHERE e.timestamp > startTime and e.timestamp < endTime - and e.timestamp > s.@min_time_map.get(i-1) - ACCUM t.@min_time_map += (i -> e.timestamp); + and e.timestamp > s.@min_times[i-1] + ACCUM t.@min_times[i] += e.timestamp; - R = + R = SELECT t FROM StepNodes:s - (signIn_REVERSE:e) -> Medium:t WHERE e.timestamp > startTime and e.timestamp < endTime and t.isBlocked == true @@ -37,10 +36,10 @@ CREATE or REPLACE QUERY tcr1( END, s.@result_set.clear() ; - - i = i + 1; - + + i = i + 1 + END; - - PRINT @@result; + + PRINT @@result; } \ No newline at end of file diff --git a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java index 5945964..abc904c 100644 --- a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java +++ b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphDbConnectionState.java @@ -15,18 +15,10 @@ public class TigerGraphDbConnectionState extends DbConnectionState { static Logger logger = LogManager.getLogger("TigerGraphDbConnectionState"); private static HikariDataSource dataSource; // Singleton instance - private Connection conn; - public TigerGraphDbConnectionState(Map properties) throws IOException { if (dataSource == null) { initializeDataSource(properties); } - - try { - conn = dataSource.getConnection(); - } catch (SQLException e) { - throw new RuntimeException("Failed to get connection from the connection pool.", e); - } } private void initializeDataSource(Map properties) { @@ -35,6 +27,7 @@ private void initializeDataSource(Map properties) { String user = properties.get("user"); String pass = properties.get("pass"); String graph = properties.get("graph"); + int maxPoolSize = Integer.parseInt(properties.get("maxPoolSize")); HikariConfig config = new HikariConfig(); StringBuilder sb = new StringBuilder(); @@ -45,24 +38,17 @@ private void initializeDataSource(Map properties) { config.setPassword(pass); config.addDataSourceProperty("graph", graph); config.addDataSourceProperty("debug", 0); + config.setMaximumPoolSize(maxPoolSize); dataSource = new HikariDataSource(config); } - public Connection getConn() { - return conn; + public Connection getPooledConn() throws SQLException { + return dataSource.getConnection(); } @Override public void close() throws IOException { - try { - if (conn != null) { - conn.close(); - } - } catch (SQLException e) { - throw new IOException("Failed to close connection", e); - } - // Close the data source after all connections have been closed if (dataSource != null) { dataSource.close(); diff --git a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java index a9acb9f..df40fc1 100644 --- a/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java +++ b/tigergraph/src/main/java/org/ldbcouncil/finbench/impls/tigergraph/TigerGraphTransactionDb.java @@ -98,22 +98,48 @@ private static double getRoundedDouble(ResultSet rs, String columnName) throws S return rs.getBigDecimal(columnName).setScale(3, BigDecimal.ROUND_HALF_UP).doubleValue(); } + private static void closeResources(Connection conn, PreparedStatement pstmt, ResultSet rs) { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (pstmt != null) { + try { + pstmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + public static class ComplexRead1Handler implements OperationHandler { @Override public void executeOperation(ComplexRead1 cr1, TigerGraphDbConnectionState dbConnectionState, ResultReporter resultReporter) throws DbException { + Connection conn = null; + PreparedStatement pstmt = null; + ResultSet rs = null; try { - Connection conn = dbConnectionState.getConn(); + conn = dbConnectionState.getPooledConn(); String query = "RUN tcr1(id=?, startTime=?, endTime=?, truncationLimit=?, truncationOrder=?)"; - PreparedStatement pstmt = conn.prepareStatement(query); + pstmt = conn.prepareStatement(query); pstmt.setQueryTimeout(QUERY_TIMEOUT_SECONDS); pstmt.setLong(1, cr1.getId()); pstmt.setLong(2, cr1.getStartTime().getTime()); pstmt.setLong(3, cr1.getEndTime().getTime()); pstmt.setInt(4, cr1.getTruncationLimit()); pstmt.setString(5, String.valueOf(cr1.getTruncationOrder())); - - ResultSet rs = pstmt.executeQuery(); + rs = pstmt.executeQuery(); List results = new ArrayList<>(); while (rs.next()) { @@ -127,6 +153,8 @@ public void executeOperation(ComplexRead1 cr1, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr1); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -135,10 +163,13 @@ public static class ComplexRead2Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead2Result rowResult = new ComplexRead2Result( @@ -159,6 +190,8 @@ public void executeOperation(ComplexRead2 cr2, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr2); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -166,17 +199,20 @@ public static class ComplexRead3Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead3Result rowResult = new ComplexRead3Result(rs.getLong("shortestPathLength")); @@ -186,6 +222,8 @@ public void executeOperation(ComplexRead3 cr3, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr3); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -193,19 +231,22 @@ public static class ComplexRead4Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead4Result rowResult = new ComplexRead4Result( @@ -222,6 +263,8 @@ public void executeOperation(ComplexRead4 cr4, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr4); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -230,12 +273,15 @@ public static class ComplexRead5Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { Array pathArray = rs.getArray("path"); @@ -265,6 +311,8 @@ public void executeOperation(ComplexRead5 cr5, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr5); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -272,11 +320,14 @@ public static class ComplexRead6Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead6Result rowResult = new ComplexRead6Result( @@ -299,6 +350,8 @@ public void executeOperation(ComplexRead6 cr6, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr6); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -307,11 +360,14 @@ public static class ComplexRead7Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead7Result rowResult = new ComplexRead7Result( @@ -332,6 +388,8 @@ public void executeOperation(ComplexRead7 cr7, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr7); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -340,11 +398,14 @@ public static class ComplexRead8Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead8Result rowResult = new ComplexRead8Result( @@ -365,6 +426,8 @@ public void executeOperation(ComplexRead8 cr8, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr8); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -373,12 +436,15 @@ public static class ComplexRead9Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead9Result rowResult = new ComplexRead9Result( @@ -399,6 +465,8 @@ public void executeOperation(ComplexRead9 cr9, TigerGraphDbConnectionState dbCon resultReporter.report(results.size(), results, cr9); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -407,19 +475,22 @@ public static class ComplexRead10Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead10Result rowResult = new ComplexRead10Result( @@ -430,6 +501,8 @@ public void executeOperation(ComplexRead10 cr10, TigerGraphDbConnectionState dbC resultReporter.report(results.size(), results, cr10); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -438,12 +511,15 @@ public static class ComplexRead11Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead11Result rowResult = new ComplexRead11Result( @@ -463,6 +539,8 @@ public void executeOperation(ComplexRead11 cr11, TigerGraphDbConnectionState dbC resultReporter.report(results.size(), results, cr11); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -471,12 +549,15 @@ public static class ComplexRead12Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { ComplexRead12Result rowResult = new ComplexRead12Result( @@ -496,6 +577,8 @@ public void executeOperation(ComplexRead12 cr12, TigerGraphDbConnectionState dbC resultReporter.report(results.size(), results, cr12); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -504,14 +587,17 @@ public static class SimpleRead1Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { SimpleRead1Result rowResult = new SimpleRead1Result( @@ -523,6 +609,8 @@ public void executeOperation(SimpleRead1 sr1, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr1); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -531,10 +619,13 @@ public static class SimpleRead2Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { SimpleRead2Result rowResult = new SimpleRead2Result( @@ -558,6 +649,8 @@ public void executeOperation(SimpleRead2 sr2, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr2); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -566,10 +659,13 @@ public static class SimpleRead3Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { SimpleRead3Result rowResult = new SimpleRead3Result( @@ -589,6 +685,8 @@ public void executeOperation(SimpleRead3 sr3, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr3); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -597,10 +695,13 @@ public static class SimpleRead4Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { if (rs.getObject("@@result") == null) { @@ -624,6 +725,8 @@ public void executeOperation(SimpleRead4 sr4, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr4); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -632,10 +735,13 @@ public static class SimpleRead5Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { if (rs.getObject("Nodes") == null) { @@ -659,6 +765,8 @@ public void executeOperation(SimpleRead5 sr5, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr5); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -667,10 +775,13 @@ public static class SimpleRead6Handler implements OperationHandler results = new ArrayList<>(); while (rs.next()) { if (rs.getObject("Nodes") == null) { @@ -691,6 +802,8 @@ public void executeOperation(SimpleRead6 sr6, TigerGraphDbConnectionState dbConn resultReporter.report(results.size(), results, sr6); } catch (SQLException e) { logger.error("Failed to createStatement: " + e); + } finally { + closeResources(conn, pstmt, rs); } } } @@ -699,10 +812,12 @@ public static class Write1Handler implements OperationHandler Date: Wed, 20 Sep 2023 16:48:14 +0800 Subject: [PATCH 6/9] fix typo in tcr1 --- tigergraph/queries/tcr1.gsql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tigergraph/queries/tcr1.gsql b/tigergraph/queries/tcr1.gsql index e7f6104..e2e306b 100644 --- a/tigergraph/queries/tcr1.gsql +++ b/tigergraph/queries/tcr1.gsql @@ -37,7 +37,7 @@ CREATE or REPLACE QUERY tcr1( s.@result_set.clear() ; - i = i + 1 + i = i + 1; END; From b467b14896fd7ff3d8efddf40f72ed38120581d0 Mon Sep 17 00:00:00 2001 From: Yongchao Liu <371063912@qq.com> Date: Wed, 20 Sep 2023 17:00:52 +0800 Subject: [PATCH 7/9] fix tcr1 typo, should use graph --- tigergraph/queries/tcr1.gsql | 1 + 1 file changed, 1 insertion(+) diff --git a/tigergraph/queries/tcr1.gsql b/tigergraph/queries/tcr1.gsql index e2e306b..82e4c8f 100644 --- a/tigergraph/queries/tcr1.gsql +++ b/tigergraph/queries/tcr1.gsql @@ -1,3 +1,4 @@ +USE GRAPH ldbc_fin CREATE or REPLACE QUERY tcr1( VERTEX id, UINT startTime, From 5f3082c367bd728389fc49cc664059165b5e57b3 Mon Sep 17 00:00:00 2001 From: Yongchao Liu <371063912@qq.com> Date: Wed, 20 Sep 2023 17:08:47 +0800 Subject: [PATCH 8/9] fix tcr1 typo again --- tigergraph/queries/tcr1.gsql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tigergraph/queries/tcr1.gsql b/tigergraph/queries/tcr1.gsql index 82e4c8f..025ea2e 100644 --- a/tigergraph/queries/tcr1.gsql +++ b/tigergraph/queries/tcr1.gsql @@ -20,7 +20,7 @@ CREATE or REPLACE QUERY tcr1( WHILE StepNodes.size() > 0 and i <= 3 DO StepNodes = - SELECT + SELECT t FROM StepNodes:s - (transfer:e) -> Account:t WHERE e.timestamp > startTime and e.timestamp < endTime and e.timestamp > s.@min_times[i-1] From f75c8586bb47c6ea7db5ce4c6ae16e6cd4bb3b49 Mon Sep 17 00:00:00 2001 From: Yiping Xiong Date: Mon, 25 Sep 2023 11:36:38 +0800 Subject: [PATCH 9/9] drop redundant udf --- tigergraph/scripts/ExprFunctions.hpp | 29 ---------------------------- tigergraph/scripts/setup_schema.sh | 4 ++-- 2 files changed, 2 insertions(+), 31 deletions(-) diff --git a/tigergraph/scripts/ExprFunctions.hpp b/tigergraph/scripts/ExprFunctions.hpp index 1723dbd..e0863e4 100644 --- a/tigergraph/scripts/ExprFunctions.hpp +++ b/tigergraph/scripts/ExprFunctions.hpp @@ -62,35 +62,6 @@ namespace UDIMPL { using namespace std; - inline uint64_t convert_time_to_uint(string str_time) { - // Convert to epoch second - tm tm_struct; - istringstream ss(str_time); - ss >> get_time(&tm_struct, "%Y-%m-%dT%H:%M:%S"); - time_t timestamp = mktime(&tm_struct); - uint64_t epoch = (uint64_t) timestamp; - - // Adjust for time zone - if (str_time.length() >= 28) { - string str_hour = str_time.substr(24, 2); - string str_minute = str_time.substr(26, 2); - uint64_t diff_second = stoi(str_hour) * 3600 + stoi(str_minute) * 60; - if (str_time.substr(23, 1) == "-") { - epoch = epoch + diff_second; - } else { - epoch = epoch - diff_second; - } - } - - // Add millisecond - uint64_t ms = 0; - if (str_time.length() >= 23) { - string str_ms = str_time.substr(20, 3); - ms = stoi(str_ms); - } - return epoch * 1000 + ms; - } - inline bool circle_and_len_check(string str_path, string key, uint path_len, string delimiter) { size_t pos_start = 0, pos_end, delim_len = delimiter.length(); string token; diff --git a/tigergraph/scripts/setup_schema.sh b/tigergraph/scripts/setup_schema.sh index d67f210..28c1883 100644 --- a/tigergraph/scripts/setup_schema.sh +++ b/tigergraph/scripts/setup_schema.sh @@ -41,8 +41,8 @@ CREATE DIRECTED EDGE signIn(From Medium, To Account, DISCRIMINATOR(timestamp UIN CREATE GRAPH ldbc_fin(*) ' -gsql 'PUT ExprUtil FROM "./ExprFunctions.hpp"' -gsql 'PUT ExprFunctions FROM "./TokenBank.cpp"' +gsql 'PUT ExprFunctions FROM "./ExprFunctions.hpp"' +gsql 'PUT TokenBank FROM "./TokenBank.cpp"' gsql -g ldbc_fin ' CREATE LOADING JOB load_fin_snapshot FOR GRAPH ldbc_fin{