diff --git a/nifi-assembly/NOTICE b/nifi-assembly/NOTICE
index 8f85ffe722e0..333de6fbb4a6 100644
--- a/nifi-assembly/NOTICE
+++ b/nifi-assembly/NOTICE
@@ -2005,6 +2005,11 @@ The following binary components are provided under the Apache Software License v
the terms of a BSD style license.
The original software and related information is available
at http://www.jcraft.com/jsch/.
+
+ (ASLv2) DataStax Java Driver for Apache Cassandra - Core
+ The following NOTICE information applies:
+ DataStax Java Driver for Apache Cassandra - Core
+ Copyright (C) 2012-2017 DataStax Inc.
(ASLv2) bytebuffer-collections
The following NOTICE information applies:
bytebuffer-collections
diff --git a/nifi-assembly/pom.xml b/nifi-assembly/pom.xml
index 43c394b1a3d4..4c61f2d5dc5b 100644
--- a/nifi-assembly/pom.xml
+++ b/nifi-assembly/pom.xml
@@ -519,6 +519,24 @@ language governing permissions and limitations under the License. -->
2.5.0-SNAPSHOT
nar
+
+ org.apache.nifi
+ nifi-cql-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-cql-services-api-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-cassandra-session-provider-service-nar
+ 2.5.0-SNAPSHOT
+ nar
+
org.apache.nifi
nifi-registry-nar
diff --git a/nifi-code-coverage/pom.xml b/nifi-code-coverage/pom.xml
index 8af4c50994e3..4004d489a348 100644
--- a/nifi-code-coverage/pom.xml
+++ b/nifi-code-coverage/pom.xml
@@ -720,6 +720,21 @@
nifi-box-services-api
2.5.0-SNAPSHOT
+
+ org.apache.nifi
+ nifi-cql-processors
+ 2.5.0-SNAPSHOT
+
+
+ org.apache.nifi
+ nifi-cassandra-session-provider-service
+ 2.5.0-SNAPSHOT
+
+
+ org.apache.nifi
+ nifi-cql-services-api
+ 2.5.0-SNAPSHOT
+
org.apache.nifi
nifi-cdc-api
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/pom.xml
new file mode 100644
index 000000000000..7dbd12958232
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/pom.xml
@@ -0,0 +1,51 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-cassandra-session-provider-service-nar
+ nar
+
+
+
+
+
+ com.google.guava
+ guava
+ provided
+
+
+
+
+
+
+ org.apache.nifi
+ nifi-cql-services-api-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-cassandra-session-provider-service
+ 2.5.0-SNAPSHOT
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/LICENSE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 000000000000..c567ce25dfd7
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,352 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+This product bundles 'libffi' which is available under an MIT style license.
+ libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
+ see https://github.com/java-native-access/jna/blob/master/native/libffi/LICENSE
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+This product bundles 'asm' which is available under a 3-Clause BSD style license.
+For details see http://asm.ow2.org/asmdex-license.html
+
+ Copyright (c) 2012 France Télécom
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+ The binary distribution of this product bundles 'Bouncy Castle JDK 1.5'
+ under an MIT style license.
+
+ Copyright (c) 2000 - 2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+The binary distribution of this product bundles 'JNR x86asm' under an MIT
+style license.
+
+ Copyright (C) 2010 Wayne Meissner
+ Copyright (c) 2008-2009, Petr Kobalicek
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+This product bundles 'logback' which is dual-licensed under the EPL v1.0
+and the LGPL 2.1.
+
+ Logback: the reliable, generic, fast and flexible logging framework.
+
+ Copyright (C) 1999-2017, QOS.ch. All rights reserved.
+
+ This program and the accompanying materials are dual-licensed under
+ either the terms of the Eclipse Public License v1.0 as published by
+ the Eclipse Foundation or (per the licensee's choosing) under the
+ terms of the GNU Lesser General Public License version 2.1 as
+ published by the Free Software Foundation.
+
+The binary distribution of this product bundles 'ANTLR 3' which is available
+under a "3-clause BSD" license. For details see http://www.antlr.org/license.html
+
+ Copyright (c) 2012 Terence Parr and Sam Harwell
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without modification, are permitted
+ provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of
+ conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of
+ conditions and the following disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ Neither the name of the author nor the names of its contributors may be used to endorse
+ or promote products derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/NOTICE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/NOTICE
new file mode 100644
index 000000000000..f2261add8685
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service-nar/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,292 @@
+nifi-cassandra-services-nar
+Copyright 2016-2020 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+******************
+Apache Software License v2
+******************
+
+The following binary components are provided under the Apache Software License v2
+
+ (ASLv2) DataStax Java Driver for Apache Cassandra - Core
+ The following NOTICE information applies:
+ DataStax Java Driver for Apache Cassandra - Core
+ Copyright (C) 2012-2017 DataStax Inc.
+
+ (ASLv2) Jackson JSON processor
+ The following NOTICE information applies:
+ # Jackson JSON processor
+
+ Jackson is a high-performance, Free/Open Source JSON processing library.
+ It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
+ been in development since 2007.
+ It is currently developed by a community of developers, as well as supported
+ commercially by FasterXML.com.
+
+ ## Licensing
+
+ Jackson core and extension components may licensed under different licenses.
+ To find the details that apply to this artifact see the accompanying LICENSE file.
+ For more information, including possible other licensing options, contact
+ FasterXML.com (http://fasterxml.com).
+
+ ## Credits
+
+ A list of contributors may be found from CREDITS file, which is included
+ in some artifacts (usually source distributions); but is always available
+ from the source code management (SCM) system project uses.
+
+ (ASLv2) Apache Commons Codec
+ The following NOTICE information applies:
+ Apache Commons Codec
+ Copyright 2002-2014 The Apache Software Foundation
+
+ src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
+ contains test data from http://aspell.net/test/orig/batch0.tab.
+ Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+
+ ===============================================================================
+
+ The content of package org.apache.commons.codec.language.bm has been translated
+ from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+ with permission from the original authors.
+ Original source copyright:
+ Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
+
+ (ASLv2) Apache Commons Lang
+ The following NOTICE information applies:
+ Apache Commons Lang
+ Copyright 2001-2017 The Apache Software Foundation
+
+ This product includes software from the Spring Framework,
+ under the Apache License 2.0 (see: StringUtils.containsWhitespace())
+
+ (ASLv2) Guava
+ The following NOTICE information applies:
+ Guava
+ Copyright 2015 The Guava Authors
+
+ (ASLv2) JSON-SMART
+ The following NOTICE information applies:
+ Copyright 2011 JSON-SMART authors
+
+ (ASLv2) Dropwizard Metrics
+ The following NOTICE information applies:
+ Copyright (c) 2010-2013 Coda Hale, Yammer.com
+
+ This product includes software developed by Coda Hale and Yammer, Inc.
+
+ This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
+ LongAdder), which was released with the following comments:
+
+ Written by Doug Lea with assistance from members of JCP JSR-166
+ Expert Group and released to the public domain, as explained at
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+ (ASLv2) The Netty Project
+ The following NOTICE information applies:
+ Copyright 2014 The Netty Project
+ -------------------------------------------------------------------------------
+ This product contains the extensions to Java Collections Framework which has
+ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+ * LICENSE:
+ * license/LICENSE.jsr166y.txt (Public Domain)
+ * HOMEPAGE:
+ * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+ This product contains a modified version of Robert Harder's Public Domain
+ Base64 Encoder and Decoder, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.base64.txt (Public Domain)
+ * HOMEPAGE:
+ * http://iharder.sourceforge.net/current/java/base64/
+
+ This product contains a modified portion of 'Webbit', an event based
+ WebSocket and HTTP server, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.webbit.txt (BSD License)
+ * HOMEPAGE:
+ * https://github.com/joewalnes/webbit
+
+ This product contains a modified portion of 'SLF4J', a simple logging
+ facade for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.slf4j.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.slf4j.org/
+
+ This product contains a modified portion of 'Apache Harmony', an open source
+ Java SE, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.harmony.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://archive.apache.org/dist/harmony/
+
+ This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+ and decompression library written by Matthew J. Francis. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jbzip2.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jbzip2/
+
+ This product contains a modified portion of 'libdivsufsort', a C API library to construct
+ the suffix array and the Burrows-Wheeler transformed string for any input string of
+ a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.libdivsufsort.txt (MIT License)
+ * HOMEPAGE:
+ * https://github.com/y-256/libdivsufsort
+
+ This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jctools.txt (ASL2 License)
+ * HOMEPAGE:
+ * https://github.com/JCTools/JCTools
+
+ This product optionally depends on 'JZlib', a re-implementation of zlib in
+ pure Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jzlib.txt (BSD style License)
+ * HOMEPAGE:
+ * http://www.jcraft.com/jzlib/
+
+ This product optionally depends on 'Compress-LZF', a Java library for encoding and
+ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/ning/compress
+
+ This product optionally depends on 'lz4', a LZ4 Java compression
+ and decompression library written by Adrien Grand. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lz4.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jpountz/lz4-java
+
+ This product optionally depends on 'lzma-java', a LZMA Java compression
+ and decompression library, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lzma-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jponge/lzma-java
+
+ This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+ and decompression library written by William Kinney. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jfastlz.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jfastlz/
+
+ This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+ interchange format, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.protobuf.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/protobuf
+
+ This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+ a temporary self-signed X.509 certificate when the JVM does not provide the
+ equivalent functionality. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.bouncycastle.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.bouncycastle.org/
+
+ This product optionally depends on 'Snappy', a compression library produced
+ by Google Inc, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.snappy.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/snappy
+
+ This product optionally depends on 'JBoss Marshalling', an alternative Java
+ serialization API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
+ * HOMEPAGE:
+ * http://www.jboss.org/jbossmarshalling
+
+ This product optionally depends on 'Caliper', Google's micro-
+ benchmarking framework, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.caliper.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/google/caliper
+
+ This product optionally depends on 'Apache Log4J', a logging framework, which
+ can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.log4j.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://logging.apache.org/log4j/
+
+ This product optionally depends on 'Aalto XML', an ultra-high performance
+ non-blocking XML processor, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://wiki.fasterxml.com/AaltoHome
+
+ This product contains a modified version of 'HPACK', a Java implementation of
+ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.hpack.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/twitter/hpack
+
+ This product contains a modified portion of 'Apache Commons Lang', a Java library
+ provides utilities for the java.lang API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.commons-lang.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://commons.apache.org/proper/commons-lang/
+
+ This product contains a forked and modified version of Tomcat Native
+
+ * LICENSE:
+ * ASL2
+ * HOMEPAGE:
+ * http://tomcat.apache.org/native-doc/
+ * https://svn.apache.org/repos/asf/tomcat/native/
+
+ (ASLv2) Objenesis
+ The following NOTICE information applies:
+ Objenesis
+ Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita
+
+************************
+Eclipse Public License 1.0
+************************
+
+The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
+
+ (EPL 2.0)(GPL 2)(LGPL 2.1) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
+ (EPL 1.0)(LGPL 2.1) Logback Classic (ch.qos.logback:logback-classic:jar:1.2.6 - http://logback.qos.ch/)
+ (EPL 1.0)(LGPL 2.1) Logback Core (ch.qos.logback:logback-core:jar:1.2.6 - http://logback.qos.ch/)
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/pom.xml
new file mode 100644
index 000000000000..db32c890fc71
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/pom.xml
@@ -0,0 +1,101 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-cassandra-session-provider-service
+ jar
+
+
+ 4.19.0
+
+
+
+
+ org.apache.nifi
+ nifi-api
+
+
+ org.apache.nifi
+ nifi-utils
+
+
+ org.apache.nifi
+ nifi-cql-services-api
+ 2.5.0-SNAPSHOT
+ provided
+
+
+
+ org.apache.cassandra
+ java-driver-core
+ ${driver.version}
+
+
+
+ org.apache.nifi
+ nifi-ssl-context-service-api
+
+
+ org.apache.nifi
+ nifi-framework-api
+
+
+ org.apache.nifi
+ nifi-mock
+ test
+
+
+
+ org.apache.avro
+ avro
+
+
+
+ org.testcontainers
+ cassandra
+ 1.20.5
+ test
+
+
+ org.testcontainers
+ junit-jupiter
+ test
+
+
+ org.apache.nifi
+ nifi-security-cert-builder
+ 2.5.0-SNAPSHOT
+ test
+
+
+ org.apache.nifi
+ nifi-record
+ compile
+
+
+ org.apache.nifi
+ nifi-avro-record-utils
+ 2.5.0-SNAPSHOT
+ compile
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionService.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionService.java
new file mode 100644
index 000000000000..e3aa03883402
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionService.java
@@ -0,0 +1,633 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.cassandra;
+
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.CqlSessionBuilder;
+import com.datastax.oss.driver.api.core.config.DefaultDriverOption;
+import com.datastax.oss.driver.api.core.config.DriverConfigLoader;
+import com.datastax.oss.driver.api.core.cql.BatchStatement;
+import com.datastax.oss.driver.api.core.cql.BatchStatementBuilder;
+import com.datastax.oss.driver.api.core.cql.BatchType;
+import com.datastax.oss.driver.api.core.cql.BoundStatement;
+import com.datastax.oss.driver.api.core.cql.ColumnDefinitions;
+import com.datastax.oss.driver.api.core.cql.PreparedStatement;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import com.datastax.oss.driver.api.core.cql.Row;
+import com.datastax.oss.driver.api.core.cql.SimpleStatement;
+import com.datastax.oss.driver.api.core.cql.Statement;
+import com.datastax.oss.driver.api.core.servererrors.QueryExecutionException;
+import com.datastax.oss.driver.api.core.type.DataType;
+import com.datastax.oss.driver.api.core.type.DataTypes;
+import com.datastax.oss.driver.api.core.type.ListType;
+import com.datastax.oss.driver.api.core.type.MapType;
+import com.datastax.oss.driver.api.core.type.SetType;
+import com.datastax.oss.driver.api.core.type.codec.registry.MutableCodecRegistry;
+import com.datastax.oss.driver.api.querybuilder.QueryBuilder;
+import com.datastax.oss.driver.api.querybuilder.delete.DeleteSelection;
+import com.datastax.oss.driver.api.querybuilder.insert.InsertInto;
+import com.datastax.oss.driver.api.querybuilder.insert.RegularInsert;
+import com.datastax.oss.driver.api.querybuilder.relation.Relation;
+import com.datastax.oss.driver.api.querybuilder.update.Update;
+import com.datastax.oss.driver.api.querybuilder.update.UpdateStart;
+import com.datastax.oss.driver.api.querybuilder.update.UpdateWithAssignments;
+import org.apache.avro.Schema;
+import org.apache.avro.SchemaBuilder;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnDisabled;
+import org.apache.nifi.annotation.lifecycle.OnEnabled;
+import org.apache.nifi.avro.AvroTypeUtil;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.components.PropertyValue;
+import org.apache.nifi.controller.AbstractControllerService;
+import org.apache.nifi.controller.ConfigurationContext;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.serialization.record.MapRecord;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.serialization.record.type.ArrayDataType;
+import org.apache.nifi.service.cassandra.mapping.FlexibleCounterCodec;
+import org.apache.nifi.service.cassandra.mapping.JavaSQLTimestampCodec;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.CQLFieldInfo;
+import org.apache.nifi.service.cql.api.CQLQueryCallback;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+import org.apache.nifi.service.cql.api.exception.QueryFailureException;
+import org.apache.nifi.ssl.SSLContextService;
+
+import javax.net.ssl.SSLContext;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.datastax.oss.driver.api.core.type.DataTypes.ASCII;
+
+@Tags({"cassandra", "dbcp", "database", "connection", "pooling"})
+@CapabilityDescription("Provides connection session for Cassandra processors to work with Apache Cassandra.")
+public class CassandraCQLExecutionService extends AbstractControllerService implements CQLExecutionService {
+
+ public static final int DEFAULT_CASSANDRA_PORT = 9042;
+
+ // Common descriptors
+
+ private CqlSession cassandraSession;
+
+ private Map statementCache;
+
+ private String keyspace;
+ private int pageSize;
+
+ public static final List PROPERTY_DESCRIPTORS = List.of(
+ CONTACT_POINTS,
+ CLIENT_AUTH,
+ DATACENTER,
+ KEYSPACE,
+ USERNAME,
+ PASSWORD,
+ PROP_SSL_CONTEXT_SERVICE,
+ FETCH_SIZE,
+ READ_TIMEOUT,
+ CONNECT_TIMEOUT,
+ CONSISTENCY_LEVEL,
+ COMPRESSION_TYPE
+ );
+
+ @Override
+ public List getSupportedPropertyDescriptors() {
+ return PROPERTY_DESCRIPTORS;
+ }
+
+ @OnEnabled
+ public void onEnabled(final ConfigurationContext context) {
+ connectToCassandra(context);
+ }
+
+ @OnDisabled
+ public void onDisabled() {
+ if (cassandraSession != null) {
+ cassandraSession.close();
+ cassandraSession = null;
+ }
+ }
+
+ private void connectToCassandra(ConfigurationContext context) {
+ if (cassandraSession == null) {
+ this.pageSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
+
+ final String consistencyLevel = context.getProperty(CONSISTENCY_LEVEL).getValue();
+ final String compression = context.getProperty(COMPRESSION_TYPE).getValue();
+ final String contactPointList = context.getProperty(CONTACT_POINTS).evaluateAttributeExpressions().getValue();
+
+ List contactPoints = getContactPoints(contactPointList);
+
+ // Set up the client for secure (SSL/TLS communications) if configured to do so
+ final SSLContextService sslService =
+ context.getProperty(PROP_SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class);
+ final SSLContext sslContext;
+
+ if (sslService == null) {
+ sslContext = null;
+ } else {
+ sslContext = sslService.createContext();
+ }
+
+ final String username, password;
+ PropertyValue usernameProperty = context.getProperty(USERNAME).evaluateAttributeExpressions();
+ PropertyValue passwordProperty = context.getProperty(PASSWORD).evaluateAttributeExpressions();
+
+ if (usernameProperty != null && passwordProperty != null) {
+ username = usernameProperty.getValue();
+ password = passwordProperty.getValue();
+ } else {
+ username = null;
+ password = null;
+ }
+
+ final Duration readTimeout = context.getProperty(READ_TIMEOUT).evaluateAttributeExpressions().asDuration();
+ final Duration connectTimeout = context.getProperty(CONNECT_TIMEOUT).evaluateAttributeExpressions().asDuration();
+
+ final String datacenter = context.getProperty(DATACENTER).evaluateAttributeExpressions().getValue();
+
+ keyspace = context.getProperty(KEYSPACE).isSet() ? context.getProperty(KEYSPACE).evaluateAttributeExpressions().getValue() : null;
+
+ DriverConfigLoader loader =
+ DriverConfigLoader.programmaticBuilder()
+ .withDuration(DefaultDriverOption.CONNECTION_CONNECT_TIMEOUT, connectTimeout)
+ .withDuration(DefaultDriverOption.REQUEST_TIMEOUT, readTimeout)
+ .withString(DefaultDriverOption.REQUEST_CONSISTENCY, consistencyLevel)
+ .withString(DefaultDriverOption.PROTOCOL_COMPRESSION, compression)
+ .build();
+
+ CqlSessionBuilder builder = CqlSession.builder()
+ .addContactPoints(contactPoints);
+ if (StringUtils.isNotBlank(username) && StringUtils.isNotBlank(password)) {
+ builder = builder.withAuthCredentials(username, password);
+ }
+
+ final CqlSession cqlSession = builder
+ .withSslContext(sslContext)
+ .withLocalDatacenter(datacenter)
+ .withKeyspace(keyspace)
+ .withConfigLoader(loader)
+ .build();
+
+ MutableCodecRegistry codecRegistry =
+ (MutableCodecRegistry) cqlSession.getContext().getCodecRegistry();
+
+ codecRegistry.register(new JavaSQLTimestampCodec());
+ codecRegistry.register(new FlexibleCounterCodec());
+
+ // Create the cluster and connect to it
+ cassandraSession = cqlSession;
+ }
+ }
+
+ private List getContactPoints(String contactPointList) {
+
+ if (contactPointList == null) {
+ return null;
+ }
+
+ final String[] contactPointStringList = contactPointList.split(",");
+ List contactPoints = new ArrayList<>();
+
+ for (String contactPointEntry : contactPointStringList) {
+ String[] addresses = contactPointEntry.split(":");
+ final String hostName = addresses[0].trim();
+ final int port = (addresses.length > 1) ? Integer.parseInt(addresses[1].trim()) : DEFAULT_CASSANDRA_PORT;
+
+ contactPoints.add(new InetSocketAddress(hostName, port));
+ }
+
+ return contactPoints;
+ }
+
+ @Override
+ public void query(String cql, boolean cacheStatement, List parameters, CQLQueryCallback callback) throws QueryFailureException {
+ SimpleStatement statement = SimpleStatement.builder(cql)
+ .setPageSize(pageSize).build();
+ PreparedStatement preparedStatement = cassandraSession.prepare(statement);
+
+
+ //TODO: cache
+ BoundStatement boundStatement = parameters != null && !parameters.isEmpty()
+ ? preparedStatement.bind(parameters.toArray()) : preparedStatement.bind();
+ ResultSet results = cassandraSession.execute(boundStatement);
+
+ Iterator resultsIterator = results.iterator();
+ long rowNumber = 0;
+
+ List columnDefinitions = new ArrayList<>();
+ AtomicReference schemaReference = new AtomicReference<>();
+
+ try {
+ while (resultsIterator.hasNext()) {
+ try {
+ Row row = resultsIterator.next();
+
+ if (schemaReference.get() == null) {
+ Schema generatedAvroSchema = createSchema(results);
+ RecordSchema converted = AvroTypeUtil.createSchema(generatedAvroSchema);
+ schemaReference.set(converted);
+ }
+
+ if (columnDefinitions.isEmpty()) {
+ row.getColumnDefinitions().forEach(def -> {
+ CQLFieldInfo info = new CQLFieldInfo(def.getName().toString(),
+ def.getType().toString(), def.getType().getProtocolCode());
+ columnDefinitions.add(info);
+ });
+ }
+
+ Map resultMap = new HashMap<>();
+
+ for (int x = 0; x < columnDefinitions.size(); x++) {
+ resultMap.put(columnDefinitions.get(x).getFieldName(), row.getObject(x));
+ }
+
+ MapRecord record = new MapRecord(schemaReference.get(), resultMap);
+
+ callback.receive(++rowNumber, record, columnDefinitions, resultsIterator.hasNext());
+ } catch (Exception ex) {
+ throw new ProcessException("Error querying CQL", ex);
+ }
+ }
+ } catch (QueryExecutionException qee) {
+ getLogger().error("Error executing query", qee);
+ throw new QueryFailureException();
+ }
+ }
+
+ protected GeneratedResult generateInsert(String cassandraTable, RecordSchema schema, Map recordContentMap) {
+ InsertInto insertQuery;
+ List keys = new ArrayList<>();
+
+ if (cassandraTable.contains(".")) {
+ String[] keyspaceAndTable = cassandraTable.split("\\.");
+ insertQuery = QueryBuilder.insertInto(keyspaceAndTable[0], keyspaceAndTable[1]);
+ } else {
+ insertQuery = QueryBuilder.insertInto(cassandraTable);
+ }
+
+ RegularInsert regularInsert = null;
+ for (String fieldName : schema.getFieldNames()) {
+ Object value = recordContentMap.get(fieldName);
+
+ if (value != null && value.getClass().isArray()) {
+ Object[] array = (Object[]) value;
+
+ if (array.length > 0) {
+ if (array[0] instanceof Byte) {
+ Object[] temp = (Object[]) value;
+ byte[] newArray = new byte[temp.length];
+ for (int x = 0; x < temp.length; x++) {
+ newArray[x] = (Byte) temp[x];
+ }
+ value = ByteBuffer.wrap(newArray);
+ }
+ }
+ }
+
+ if (schema.getDataType(fieldName).isPresent()) {
+ org.apache.nifi.serialization.record.DataType fieldDataType = schema.getDataType(fieldName).get();
+ if (fieldDataType.getFieldType() == RecordFieldType.ARRAY) {
+ if (((ArrayDataType) fieldDataType).getElementType().getFieldType() == RecordFieldType.STRING) {
+ value = Arrays.stream((Object[]) value).toArray(String[]::new);
+ }
+ }
+ }
+
+ if (regularInsert == null) {
+ regularInsert = insertQuery.value(fieldName, QueryBuilder.bindMarker(fieldName));
+ } else {
+ regularInsert = regularInsert.value(fieldName, QueryBuilder.bindMarker(fieldName));
+ }
+
+ keys.add(fieldName);
+ }
+
+ if (regularInsert == null) {
+ throw new ProcessException("Could not build an insert statement from the supplied record");
+ }
+
+ return new GeneratedResult(regularInsert.build(), keys);
+ }
+
+ @Override
+ public void insert(String table, org.apache.nifi.serialization.record.Record record) {
+ GeneratedResult result = generateInsert(table, record.getSchema(), ((MapRecord) record).toMap(true));
+ PreparedStatement preparedStatement = cassandraSession.prepare(result.statement);
+
+ BoundStatement boundStatement = preparedStatement.bind(getBindValues(record, result.keysUsed));
+
+ cassandraSession.execute(boundStatement);
+ }
+
+ @Override
+ public void insert(String table, List records) {
+ if (records == null || records.isEmpty()) {
+ return;
+ }
+
+ BatchStatementBuilder builder = BatchStatement.builder(BatchType.LOGGED);
+ GeneratedResult result = generateInsert(table, records.get(0).getSchema(), ((MapRecord) records.get(0)).toMap(true));
+ PreparedStatement preparedStatement = cassandraSession.prepare(result.statement);
+
+ for (org.apache.nifi.serialization.record.Record record : records) {
+ builder.addStatement(preparedStatement.bind(getBindValues(record, result.keysUsed)));
+ }
+ cassandraSession.execute(builder.build());
+ }
+
+ @Override
+ public String getTransitUrl(String tableName) {
+ return "cassandra://" + cassandraSession.getMetadata().getClusterName() + "." + tableName;
+ }
+
+ /**
+ * Creates an Avro schema from the given result set. The metadata (column definitions, data types, etc.) is used
+ * to determine a schema for Avro.
+ *
+ * @param rs The result set from which an Avro schema will be created
+ * @return An Avro schema corresponding to the given result set's metadata
+ */
+ public static Schema createSchema(final ResultSet rs) {
+ final ColumnDefinitions columnDefinitions = rs.getColumnDefinitions();
+ final int nrOfColumns = (columnDefinitions == null ? 0 : columnDefinitions.size());
+ String tableName = "NiFi_Cassandra_Query_Record";
+ if (nrOfColumns > 0) {
+ String tableNameFromMeta = columnDefinitions.get(0).getTable().toString(); //.getTable(0);
+ if (!StringUtils.isBlank(tableNameFromMeta)) {
+ tableName = tableNameFromMeta;
+ }
+ }
+
+ final SchemaBuilder.FieldAssembler builder = SchemaBuilder.record(tableName).namespace("any.data").fields();
+ if (columnDefinitions != null) {
+ for (int i = 0; i < nrOfColumns; i++) {
+
+ DataType dataType = columnDefinitions.get(i).getType();
+ if (dataType == null) {
+ throw new IllegalArgumentException("No data type for column[" + i + "] with name "
+ + columnDefinitions.get(i).getName());
+ }
+
+ if (dataType instanceof ListType l) {
+ builder.name(columnDefinitions.get(i).getName().toString()).type().unionOf().nullBuilder().endNull().and().array()
+ .items(getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(l.getElementType()))).endUnion().noDefault();
+ } else if (dataType instanceof SetType s) {
+ builder.name(columnDefinitions.get(i).getName().toString()).type().unionOf().nullBuilder().endNull().and().array()
+ .items(getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(s.getElementType()))).endUnion().noDefault();
+ } else if (dataType instanceof MapType m) {
+ builder.name(columnDefinitions.get(i).getName().toString()).type().unionOf().nullBuilder().endNull().and().map().values(
+ getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(m.getValueType()))).endUnion().noDefault();
+ } else {
+ builder.name(columnDefinitions.get(i).getName().toString())
+ .type(getUnionFieldType(getPrimitiveAvroTypeFromCassandraType(dataType))).noDefault();
+ }
+ }
+ }
+ return builder.endRecord();
+ }
+
+ /**
+ * This method will create a schema a union field consisting of null and the specified type.
+ *
+ * @param dataType The data type of the field
+ */
+ protected static Schema getUnionFieldType(String dataType) {
+ return SchemaBuilder.builder().unionOf().nullBuilder().endNull().and().type(getSchemaForType(dataType)).endUnion();
+ }
+
+ /**
+ * This method will create an Avro schema for the specified type.
+ *
+ * @param dataType The data type of the field
+ */
+ protected static Schema getSchemaForType(final String dataType) {
+ final SchemaBuilder.TypeBuilder typeBuilder = SchemaBuilder.builder();
+ final Schema returnSchema = switch (dataType) {
+ case "string" -> typeBuilder.stringType();
+ case "boolean" -> typeBuilder.booleanType();
+ case "int" -> typeBuilder.intType();
+ case "long" -> typeBuilder.longType();
+ case "float" -> typeBuilder.floatType();
+ case "double" -> typeBuilder.doubleType();
+ case "bytes" -> typeBuilder.bytesType();
+ default -> throw new IllegalArgumentException("Unknown Avro primitive type: " + dataType);
+ };
+ return returnSchema;
+ }
+
+ protected static String getPrimitiveAvroTypeFromCassandraType(final DataType dataType) {
+ // Map types from Cassandra to Avro where possible
+ if (dataType.equals(ASCII)
+ || dataType.equals(DataTypes.TEXT)
+ // Nonstandard types represented by this processor as a string
+ || dataType.equals(DataTypes.TIMESTAMP)
+ || dataType.equals(DataTypes.TIMEUUID)
+ || dataType.equals(DataTypes.UUID)
+ || dataType.equals(DataTypes.INET)
+ || dataType.equals(DataTypes.VARINT)) {
+ return "string";
+
+ } else if (dataType.equals(DataTypes.BOOLEAN)) {
+ return "boolean";
+
+ } else if (dataType.equals(DataTypes.INT)) {
+ return "int";
+
+ } else if (dataType.equals(DataTypes.BIGINT)
+ || dataType.equals(DataTypes.COUNTER)) {
+ return "long";
+
+ } else if (dataType.equals(DataTypes.FLOAT)) {
+ return "float";
+
+ } else if (dataType.equals(DataTypes.DOUBLE)) {
+ return "double";
+
+ } else if (dataType.equals(DataTypes.BLOB)) {
+ return "bytes";
+
+ } else {
+ throw new IllegalArgumentException("createSchema: Unknown Cassandra data type " + dataType
+ + " cannot be converted to Avro type");
+ }
+ }
+
+ private String[] getTableAndKeyspace(String cassandraTable) {
+ if (cassandraTable.contains(".")) {
+ return cassandraTable.split("\\.");
+ } else {
+ return new String[] {keyspace, cassandraTable};
+ }
+ }
+
+ protected SimpleStatement generateDelete(String cassandraTable, org.apache.nifi.serialization.record.Record record, List deleteKeyNames) {
+ DeleteSelection deleteSelection;
+ RecordSchema schema = record.getSchema();
+
+ // Split up the update key names separated by a comma, should not be empty
+ if (deleteKeyNames == null || deleteKeyNames.isEmpty()) {
+ throw new IllegalArgumentException("No delete keys were specified");
+ }
+
+ // Verify if all update keys are present in the record
+ for (String deleteKey : deleteKeyNames) {
+ if (!schema.getFieldNames().contains(deleteKey)) {
+ throw new IllegalArgumentException("Delete key '" + deleteKey + "' is not present in the record schema");
+ }
+ }
+
+ final String[] tableAndKeyspace = getTableAndKeyspace(cassandraTable);
+
+ deleteSelection = QueryBuilder.deleteFrom(tableAndKeyspace[0], tableAndKeyspace[1]);
+
+ List otherKeys = schema.getFieldNames().stream()
+ .filter(fieldName -> !deleteKeyNames.contains(fieldName))
+ .toList();
+
+ List whereCriteria = new ArrayList<>();
+
+ for (String fieldName : otherKeys) {
+ whereCriteria.add(Relation.column("k").isEqualTo(QueryBuilder.bindMarker(fieldName)));
+ }
+
+ return deleteSelection.where(whereCriteria).build();
+ }
+
+ protected GeneratedResult generateUpdate(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeyNames, UpdateMethod updateMethod) {
+ UpdateStart updateQueryStart;
+ RecordSchema schema = record.getSchema();
+
+ List keysUsedInOrder = new ArrayList<>();
+
+ // Split up the update key names separated by a comma, should not be empty
+ if (updateKeyNames == null || updateKeyNames.isEmpty()) {
+ throw new IllegalArgumentException("No Update Keys were specified");
+ }
+
+ // Verify if all update keys are present in the record
+ for (String updateKey : updateKeyNames) {
+ if (!schema.getFieldNames().contains(updateKey)) {
+ throw new IllegalArgumentException("Update key '" + updateKey + "' is not present in the record schema");
+ }
+ }
+
+ // Prepare keyspace/table names
+ if (cassandraTable.contains(".")) {
+ String[] keyspaceAndTable = cassandraTable.split("\\.");
+ updateQueryStart = QueryBuilder.update(keyspaceAndTable[0], keyspaceAndTable[1]);
+ } else {
+ updateQueryStart = QueryBuilder.update(cassandraTable);
+ }
+
+ UpdateWithAssignments updateAssignments = null;
+
+ List otherKeys = schema.getFieldNames().stream()
+ .filter(fieldName -> !updateKeyNames.contains(fieldName))
+ .toList();
+
+ for (String fieldName : otherKeys) {
+ if (updateMethod == UpdateMethod.SET) {
+ updateAssignments = updateAssignments == null ? updateQueryStart.setColumn(fieldName, QueryBuilder.bindMarker(fieldName))
+ : updateAssignments.setColumn(fieldName, QueryBuilder.bindMarker(fieldName));
+ } else if (updateMethod == UpdateMethod.INCREMENT) {
+ updateAssignments = updateAssignments == null ? updateQueryStart.increment(fieldName, QueryBuilder.bindMarker(fieldName))
+ : updateAssignments.increment(fieldName, QueryBuilder.bindMarker(fieldName));
+ } else if (updateMethod == UpdateMethod.DECREMENT) {
+ updateAssignments = updateAssignments == null ? updateQueryStart.decrement(fieldName, QueryBuilder.bindMarker(fieldName))
+ : updateAssignments.decrement(fieldName, QueryBuilder.bindMarker(fieldName));
+ } else {
+ throw new IllegalArgumentException("Update Method '" + updateMethod + "' is not valid.");
+ }
+
+ keysUsedInOrder.add(fieldName);
+ }
+
+ if (updateAssignments == null) {
+ throw new ProcessException("No update assignment found");
+ }
+
+ Update update = null;
+
+ for (String fieldName : updateKeyNames) {
+ update = update == null ? updateAssignments.whereColumn(fieldName).isEqualTo(QueryBuilder.bindMarker(fieldName))
+ : update.whereColumn(fieldName).isEqualTo(QueryBuilder.bindMarker(fieldName));
+ keysUsedInOrder.add(fieldName);
+ }
+
+ return new GeneratedResult(update.build(), keysUsedInOrder);
+ }
+
+ @Override
+ public void delete(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys) {
+ Statement deleteStatement = generateDelete(cassandraTable, record, updateKeys);
+ cassandraSession.execute(deleteStatement);
+ }
+
+ @Override
+ public void update(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys, UpdateMethod updateMethod) {
+ GeneratedResult result = generateUpdate(cassandraTable, record, updateKeys, updateMethod);
+ PreparedStatement preparedStatement = cassandraSession.prepare(result.statement());
+
+ BoundStatement statement = preparedStatement.bind(getBindValues(record, result.keysUsed()));
+
+ cassandraSession.execute(statement);
+ }
+
+ private Object[] getBindValues(org.apache.nifi.serialization.record.Record record, List keyNamesInOrder) {
+ Object[] result = new Object[keyNamesInOrder.size()];
+
+ for (int i = 0; i < keyNamesInOrder.size(); i++) {
+ result[i] = record.getValue(keyNamesInOrder.get(i));
+ }
+
+ return result;
+ }
+
+ @Override
+ public void update(String cassandraTable, List records, List updateKeys, UpdateMethod updateMethod) {
+ if (records == null || records.isEmpty()) {
+ return;
+ }
+
+ BatchStatementBuilder builder = BatchStatement.builder(BatchType.LOGGED);
+
+ GeneratedResult result = generateUpdate(cassandraTable, records.get(0), updateKeys, updateMethod);
+ PreparedStatement preparedStatement = cassandraSession.prepare(result.statement());
+
+ for (org.apache.nifi.serialization.record.Record record : records) {
+ builder.addStatement(preparedStatement.bind(getBindValues(record, result.keysUsed())));
+ }
+ cassandraSession.execute(builder.build());
+ }
+
+ record GeneratedResult(SimpleStatement statement, List keysUsed) {
+
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/FlexibleCounterCodec.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/FlexibleCounterCodec.java
new file mode 100644
index 000000000000..ce8d2adab3ff
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/FlexibleCounterCodec.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cassandra.mapping;
+
+import com.datastax.oss.driver.api.core.ProtocolVersion;
+import com.datastax.oss.driver.api.core.type.DataType;
+import com.datastax.oss.driver.api.core.type.DataTypes;
+import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
+import com.datastax.oss.driver.api.core.type.codec.TypeCodecs;
+import com.datastax.oss.driver.api.core.type.reflect.GenericType;
+
+import java.nio.ByteBuffer;
+
+public class FlexibleCounterCodec implements TypeCodec {
+
+ private final TypeCodec inner = TypeCodecs.BIGINT;
+
+ @Override
+ public GenericType getJavaType() {
+ return GenericType.of(Number.class);
+ }
+
+ @Override
+ public DataType getCqlType() {
+ return DataTypes.COUNTER;
+ }
+
+ @Override
+ public ByteBuffer encode(Number value, ProtocolVersion protocolVersion) {
+ if (value == null) return null;
+ return inner.encode(value.longValue(), protocolVersion);
+ }
+
+ @Override
+ public Number decode(ByteBuffer bytes, ProtocolVersion protocolVersion) {
+ Long val = inner.decode(bytes, protocolVersion);
+ return val; // returns as Long, but you can cast if needed
+ }
+
+ @Override
+ public String format(Number value) {
+ return inner.format(value == null ? null : value.longValue());
+ }
+
+ @Override
+ public Number parse(String value) {
+ return inner.parse(value);
+ }
+}
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/JavaSQLTimestampCodec.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/JavaSQLTimestampCodec.java
new file mode 100644
index 000000000000..13221b854035
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/java/org/apache/nifi/service/cassandra/mapping/JavaSQLTimestampCodec.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cassandra.mapping;
+
+import com.datastax.oss.driver.api.core.ProtocolVersion;
+import com.datastax.oss.driver.api.core.type.DataType;
+import com.datastax.oss.driver.api.core.type.codec.TypeCodec;
+import com.datastax.oss.driver.api.core.type.codec.TypeCodecs;
+import com.datastax.oss.driver.api.core.type.reflect.GenericType;
+
+import java.nio.ByteBuffer;
+import java.sql.Timestamp;
+import java.time.Instant;
+
+public class JavaSQLTimestampCodec implements TypeCodec {
+ private final TypeCodec instantCodec;
+
+ public JavaSQLTimestampCodec() {
+ this.instantCodec = TypeCodecs.TIMESTAMP;
+ }
+
+ @Override
+ public GenericType getJavaType() {
+ return GenericType.of(Timestamp.class);
+ }
+
+ @Override
+ public DataType getCqlType() {
+ return instantCodec.getCqlType(); // maps to CQL `timestamp`
+ }
+
+ @Override
+ public ByteBuffer encode(Timestamp value, ProtocolVersion protocolVersion) {
+ return value == null ? null : instantCodec.encode(value.toInstant(), protocolVersion);
+ }
+
+ @Override
+ public Timestamp decode(ByteBuffer bytes, ProtocolVersion protocolVersion) {
+ Instant instant = instantCodec.decode(bytes, protocolVersion);
+ return instant == null ? null : Timestamp.from(instant);
+ }
+
+ @Override
+ public String format(Timestamp value) {
+ return value == null ? "NULL" : instantCodec.format(value.toInstant());
+ }
+
+ @Override
+ public Timestamp parse(String value) {
+ Instant instant = instantCodec.parse(value);
+ return instant == null ? null : Timestamp.from(instant);
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
new file mode 100644
index 000000000000..c5b134d343f5
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.nifi.service.cassandra.CassandraCQLExecutionService
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/AbstractTestCassandraCQLExecutionService.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/AbstractTestCassandraCQLExecutionService.java
new file mode 100644
index 000000000000..7cfdb5849381
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/AbstractTestCassandraCQLExecutionService.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cassandra;
+
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import com.datastax.oss.driver.api.core.cql.Row;
+import org.apache.nifi.security.cert.builder.StandardCertificateBuilder;
+import org.apache.nifi.serialization.SimpleRecordSchema;
+import org.apache.nifi.serialization.record.MapRecord;
+import org.apache.nifi.serialization.record.RecordField;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.service.cassandra.mock.MockCassandraProcessor;
+import org.apache.nifi.service.cql.api.CQLQueryCallback;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Test;
+import org.testcontainers.cassandra.CassandraContainer;
+import org.testcontainers.images.builder.Transferable;
+import org.testcontainers.junit.jupiter.Testcontainers;
+
+import javax.security.auth.x500.X500Principal;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.Key;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
+@Testcontainers
+public abstract class AbstractTestCassandraCQLExecutionService {
+ public static final String adminPassword = UUID.randomUUID().toString();
+
+ private static TestRunner runner;
+ private static CassandraCQLExecutionService sessionProvider;
+
+ private static final Map CONTAINER_ENVIRONMENT = new LinkedHashMap<>();
+
+ private static final Base64.Encoder ENCODER = Base64.getEncoder();
+
+ private static final X500Principal CERTIFICATE_ISSUER = new X500Principal("CN=localhost");
+
+ private static final Collection DNS_NAMES = Collections.singleton("localhost");
+
+ private static final String CERTIFICATE_FORMAT = "-----BEGIN CERTIFICATE-----%n%s%n-----END CERTIFICATE-----";
+
+ private static final String KEY_FORMAT = "-----BEGIN PRIVATE KEY-----%n%s%n-----END PRIVATE KEY-----";
+
+ private static final String SSL_DIRECTORY = "/ssl";
+
+ private static final String CERTIFICATE_FILE = "public.crt";
+
+ private static final String CONTAINER_CERTIFICATE_PATH = String.format("%s/%s", SSL_DIRECTORY, CERTIFICATE_FILE);
+
+ private static final String KEY_FILE = "private.key";
+
+ private static final String CONTAINER_KEY_PATH = String.format("%s/%s", SSL_DIRECTORY, KEY_FILE);
+
+ public static CassandraContainer container;
+
+ public static final String KEYSPACE = "testspace";
+
+ static CqlSession session;
+
+ public static void setup(String cassandraDockerString) throws Exception {
+ container = new CassandraContainer(cassandraDockerString)
+ .withInitScript("init.cql");
+
+ setCertificatePrivateKey();
+
+ container.withEnv(CONTAINER_ENVIRONMENT);
+ container.withExposedPorts(9042);
+ container.start();
+
+ MockCassandraProcessor mockCassandraProcessor = new MockCassandraProcessor();
+ sessionProvider = new CassandraCQLExecutionService();
+
+ final String contactPoint = container.getContainerIpAddress() + ":" + container.getMappedPort(9042);
+
+ runner = TestRunners.newTestRunner(mockCassandraProcessor);
+ runner.addControllerService("cassandra-session-provider", sessionProvider);
+ runner.setProperty(sessionProvider, CassandraCQLExecutionService.USERNAME, "admin");
+ runner.setProperty(sessionProvider, CassandraCQLExecutionService.PASSWORD, adminPassword);
+ runner.setProperty(sessionProvider, CassandraCQLExecutionService.CONTACT_POINTS, contactPoint);
+ runner.setProperty(sessionProvider, CassandraCQLExecutionService.DATACENTER, "datacenter1");
+ runner.setProperty(sessionProvider, CassandraCQLExecutionService.KEYSPACE, KEYSPACE);
+
+ runner.enableControllerService(sessionProvider);
+
+ session = CqlSession
+ .builder()
+ .addContactPoint(container.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+ }
+
+ @AfterAll
+ public static void tearDown() throws Exception {
+ container.stop();
+ }
+
+ private RecordSchema getSchema() {
+ List fields = List.of(
+ new RecordField("sender", RecordFieldType.STRING.getDataType()),
+ new RecordField("receiver", RecordFieldType.STRING.getDataType()),
+ new RecordField("message", RecordFieldType.STRING.getDataType()),
+ new RecordField("when_sent", RecordFieldType.TIMESTAMP.getDataType())
+ );
+ return new SimpleRecordSchema(fields);
+ }
+
+ @Test
+ public void testInsertRecord() {
+ RecordSchema schema = getSchema();
+ Map rawRecord = new HashMap<>();
+ rawRecord.put("sender", "john.smith");
+ rawRecord.put("receiver", "jane.smith");
+ rawRecord.put("message", "hello");
+ rawRecord.put("when_sent", Instant.now());
+
+ MapRecord record = new MapRecord(schema, rawRecord);
+
+ assertDoesNotThrow(() -> sessionProvider.insert("message", record));
+ }
+
+ @Test
+ public void testIncrementAndDecrement() throws Exception {
+ RecordField field1 = new RecordField("column_a", RecordFieldType.STRING.getDataType());
+ RecordField field2 = new RecordField("increment_field", RecordFieldType.INT.getDataType());
+ RecordSchema schema = new SimpleRecordSchema(List.of(field1, field2));
+
+ HashMap map = new HashMap<>();
+ map.put("column_a", "abcdef");
+ map.put("increment_field", 1);
+
+ MapRecord record = new MapRecord(schema, map);
+
+ List updateKeys = new ArrayList<>();
+ updateKeys.add("column_a");
+
+ //Set the initial value
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.INCREMENT);
+
+ Thread.sleep(1000);
+
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.INCREMENT);
+
+ ResultSet results = session.execute("select increment_field from testspace.counter_test where column_a = 'abcdef'");
+
+ Iterator rowIterator = results.iterator();
+
+ Row row = rowIterator.next();
+
+ assertEquals(2, row.getLong("increment_field"));
+
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.DECREMENT);
+
+ results = session.execute("select increment_field from testspace.counter_test where column_a = 'abcdef'");
+
+ rowIterator = results.iterator();
+
+ row = rowIterator.next();
+
+ assertEquals(1, row.getLong("increment_field"));
+ }
+
+ @Test
+ public void testUpdateSet() throws Exception {
+ session.execute("insert into testspace.simple_set_test(username, is_active) values('john.smith', true)");
+ Thread.sleep(250);
+
+ RecordField field1 = new RecordField("username", RecordFieldType.STRING.getDataType());
+ RecordField field2 = new RecordField("is_active", RecordFieldType.BOOLEAN.getDataType());
+ RecordSchema schema = new SimpleRecordSchema(List.of(field1, field2));
+
+ HashMap map = new HashMap<>();
+ map.put("username", "john.smith");
+ map.put("is_active", false);
+
+ MapRecord record = new MapRecord(schema, map);
+
+ List updateKeys = new ArrayList<>();
+ updateKeys.add("username");
+
+ sessionProvider.update("simple_set_test", record, updateKeys, UpdateMethod.SET);
+
+ Iterator iterator = session.execute("select is_active from testspace.simple_set_test where username = 'john.smith'").iterator();
+
+ Row row = iterator.next();
+
+ assertFalse(row.getBoolean("is_active"));
+ }
+
+ @Test
+ public void testQueryRecord() {
+ String[] statements = """
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'def', toTimestamp(now()));
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'ghi', toTimestamp(now()));
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'jkl', toTimestamp(now()));
+ """.trim().split("\\;");
+ for (String statement : statements) {
+ session.execute(statement);
+ }
+
+ List records = new ArrayList<>();
+ CQLQueryCallback callback = (rowNumber, result, fields, isExhausted) -> records.add(result);
+
+ sessionProvider.query("select * from testspace.query_test", false, null, callback);
+ }
+
+ private static void setCertificatePrivateKey() throws Exception {
+ final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
+ final KeyPair keyPair = keyPairGenerator.generateKeyPair();
+ final X509Certificate certificate = new StandardCertificateBuilder(keyPair, CERTIFICATE_ISSUER, Duration.ofDays(1))
+ .setDnsSubjectAlternativeNames(DNS_NAMES)
+ .build();
+
+ final Key key = keyPair.getPrivate();
+ final String keyEncoded = getKeyEncoded(key);
+ container.withCopyToContainer(Transferable.of(keyEncoded), CONTAINER_KEY_PATH);
+
+ final String certificateEncoded = getCertificateEncoded(certificate);
+ container.withCopyToContainer(Transferable.of(certificateEncoded), CONTAINER_CERTIFICATE_PATH);
+ writeCertificateEncoded(certificateEncoded);
+ }
+
+ private static String getCertificateEncoded(final Certificate certificate) throws Exception {
+ final byte[] certificateEncoded = certificate.getEncoded();
+ final String encoded = ENCODER.encodeToString(certificateEncoded);
+ return String.format(CERTIFICATE_FORMAT, encoded);
+ }
+
+ private static String getKeyEncoded(final Key key) {
+ final byte[] keyEncoded = key.getEncoded();
+ final String encoded = ENCODER.encodeToString(keyEncoded);
+ return String.format(KEY_FORMAT, encoded);
+ }
+
+ private static Path writeCertificateEncoded(final String certificateEncoded) throws IOException {
+ final Path certificateFile = Files.createTempFile(AbstractTestCassandraCQLExecutionService.class.getSimpleName(), ".crt");
+ Files.write(certificateFile, certificateEncoded.getBytes(StandardCharsets.UTF_8));
+ certificateFile.toFile().deleteOnExit();
+ return certificateFile;
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV3IT.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV3IT.java
new file mode 100644
index 000000000000..9e4eba5c27c1
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV3IT.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.cassandra;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.testcontainers.junit.jupiter.Testcontainers;
+
+@Testcontainers
+public class CassandraCQLExecutionServiceV3IT extends AbstractTestCassandraCQLExecutionService {
+ public static final String CASSANDRA_IMAGE = "cassandra:3.11";
+
+ @BeforeAll
+ public static void setUp() throws Exception {
+ setup(CASSANDRA_IMAGE);
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV4IT.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV4IT.java
new file mode 100644
index 000000000000..fc7aa41f6e62
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV4IT.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.cassandra;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.testcontainers.junit.jupiter.Testcontainers;
+
+@Testcontainers
+public class CassandraCQLExecutionServiceV4IT extends AbstractTestCassandraCQLExecutionService {
+ public static String CASSANDRA_IMAGE = "cassandra:4.1";
+
+ @BeforeAll
+ public static void beforAll() throws Exception {
+ setup(CASSANDRA_IMAGE);
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV5IT.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV5IT.java
new file mode 100644
index 000000000000..1248992a4c60
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/CassandraCQLExecutionServiceV5IT.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cassandra;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.testcontainers.junit.jupiter.Testcontainers;
+
+@Testcontainers
+public class CassandraCQLExecutionServiceV5IT extends AbstractTestCassandraCQLExecutionService {
+ public static final String CASSANDRA_IMAGE = "cassandra:5.0";
+
+ @BeforeAll
+ public static void beforeAll() throws Exception {
+ setup(CASSANDRA_IMAGE);
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/mock/MockCassandraProcessor.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/mock/MockCassandraProcessor.java
new file mode 100644
index 000000000000..fb1c4a54569a
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/java/org/apache/nifi/service/cassandra/mock/MockCassandraProcessor.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.cassandra.mock;
+
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.service.cassandra.CassandraCQLExecutionService;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Mock Cassandra processor for testing CassandraSessionProvider
+ */
+public class MockCassandraProcessor extends AbstractProcessor {
+ private static PropertyDescriptor CASSANDRA_SESSION_PROVIDER = new PropertyDescriptor.Builder()
+ .name("Cassandra Session Provider")
+ .required(true)
+ .description("Controller Service to obtain a Cassandra connection session")
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .identifiesControllerService(CassandraCQLExecutionService.class)
+ .build();
+
+ @Override
+ public List getSupportedPropertyDescriptors() {
+ return Collections.singletonList(CASSANDRA_SESSION_PROVIDER);
+ }
+
+ @Override
+ public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
+
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/resources/init.cql b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/resources/init.cql
new file mode 100644
index 000000000000..a5b7be221fa2
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cassandra-session-provider-service/src/test/resources/init.cql
@@ -0,0 +1,47 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+create keyspace testspace with replication = { 'class': 'SimpleStrategy', 'replication_factor': 1};
+
+create table testspace.message
+(
+ sender text,
+ receiver text,
+ message text,
+ when_sent timestamp,
+ primary key ( sender, receiver, when_sent )
+);
+
+create table testspace.query_test
+(
+ column_a text,
+ column_b text,
+ when timestamp,
+ primary key ( (column_a), column_b)
+);
+
+create table testspace.counter_test
+(
+ column_a text,
+ increment_field counter,
+ primary key ( column_a )
+);
+
+create table testspace.simple_set_test
+(
+ username text,
+ is_active boolean,
+ primary key ( username )
+);
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/pom.xml
new file mode 100644
index 000000000000..55fa60db153f
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/pom.xml
@@ -0,0 +1,51 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.nifi
+ nifi-cql-bundle
+ 2.5.0-SNAPSHOT
+
+
+ nifi-cql-nar
+ nar
+
+
+
+
+
+ com.google.guava
+ guava
+ provided
+
+
+
+
+
+
+ org.apache.nifi
+ nifi-cql-services-api-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-cql-processors
+
+
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/LICENSE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 000000000000..2672f6e36a1d
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,342 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+This product bundles 'libffi' which is available under an MIT style license.
+ libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
+ see https://github.com/java-native-access/jna/blob/master/native/libffi/LICENSE
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+This product bundles 'asm' which is available under a 3-Clause BSD style license.
+For details see http://asm.ow2.org/asmdex-license.html
+
+ Copyright (c) 2012 France Télécom
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+ The binary distribution of this product bundles 'Bouncy Castle JDK 1.5'
+ under an MIT style license.
+
+ Copyright (c) 2000 - 2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+The binary distribution of this product bundles 'JNR x86asm' under an MIT
+style license.
+
+ Copyright (C) 2010 Wayne Meissner
+ Copyright (c) 2008-2009, Petr Kobalicek
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ The binary distribution of this product bundles 'ParaNamer' and 'Paranamer Core'
+ which is available under a BSD style license.
+
+ Copyright (c) 2006 Paul Hammant & ThoughtWorks Inc
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/NOTICE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/NOTICE
new file mode 100644
index 000000000000..6bd6684a5156
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-nar/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,328 @@
+nifi-cassandra-nar
+Copyright 2016-2020 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+******************
+Apache Software License v2
+******************
+
+The following binary components are provided under the Apache Software License v2
+
+ (ASLv2) DataStax Java Driver for Apache Cassandra - Core
+ The following NOTICE information applies:
+ DataStax Java Driver for Apache Cassandra - Core
+ Copyright (C) 2012-2017 DataStax Inc.
+
+ (ASLv2) Apache Avro
+ The following NOTICE information applies:
+ Apache Avro
+ Copyright 2009-2017 The Apache Software Foundation
+
+ (ASLv2) Jackson JSON processor
+ The following NOTICE information applies:
+ # Jackson JSON processor
+
+ Jackson is a high-performance, Free/Open Source JSON processing library.
+ It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
+ been in development since 2007.
+ It is currently developed by a community of developers, as well as supported
+ commercially by FasterXML.com.
+
+ ## Licensing
+
+ Jackson core and extension components may licensed under different licenses.
+ To find the details that apply to this artifact see the accompanying LICENSE file.
+ For more information, including possible other licensing options, contact
+ FasterXML.com (http://fasterxml.com).
+
+ ## Credits
+
+ A list of contributors may be found from CREDITS file, which is included
+ in some artifacts (usually source distributions); but is always available
+ from the source code management (SCM) system project uses.
+
+ (ASLv2) Apache Commons Codec
+ The following NOTICE information applies:
+ Apache Commons Codec
+ Copyright 2002-2014 The Apache Software Foundation
+
+ src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
+ contains test data from http://aspell.net/test/orig/batch0.tab.
+ Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+
+ ===============================================================================
+
+ The content of package org.apache.commons.codec.language.bm has been translated
+ from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+ with permission from the original authors.
+ Original source copyright:
+ Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
+
+ (ASLv2) Apache Commons Compress
+ The following NOTICE information applies:
+ Apache Commons Compress
+ Copyright 2002-2017 The Apache Software Foundation
+
+ The files in the package org.apache.commons.compress.archivers.sevenz
+ were derived from the LZMA SDK, version 9.20 (C/ and CPP/7zip/),
+ which has been placed in the public domain:
+
+ "LZMA SDK is placed in the public domain." (http://www.7-zip.org/sdk.html)
+
+ (ASLv2) Apache Commons IO
+ The following NOTICE information applies:
+ Apache Commons IO
+ Copyright 2002-2016 The Apache Software Foundation
+
+ (ASLv2) Apache Commons Lang
+ The following NOTICE information applies:
+ Apache Commons Lang
+ Copyright 2001-2017 The Apache Software Foundation
+
+ This product includes software from the Spring Framework,
+ under the Apache License 2.0 (see: StringUtils.containsWhitespace())
+
+ (ASLv2) Guava
+ The following NOTICE information applies:
+ Guava
+ Copyright 2015 The Guava Authors
+
+ (ASLv2) Dropwizard Metrics
+ The following NOTICE information applies:
+ Copyright (c) 2010-2013 Coda Hale, Yammer.com
+
+ This product includes software developed by Coda Hale and Yammer, Inc.
+
+ This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
+ LongAdder), which was released with the following comments:
+
+ Written by Doug Lea with assistance from members of JCP JSR-166
+ Expert Group and released to the public domain, as explained at
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+ (ASLv2) The Netty Project
+ The following NOTICE information applies:
+ Copyright 2014 The Netty Project
+ -------------------------------------------------------------------------------
+ This product contains the extensions to Java Collections Framework which has
+ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+ * LICENSE:
+ * license/LICENSE.jsr166y.txt (Public Domain)
+ * HOMEPAGE:
+ * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+ This product contains a modified version of Robert Harder's Public Domain
+ Base64 Encoder and Decoder, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.base64.txt (Public Domain)
+ * HOMEPAGE:
+ * http://iharder.sourceforge.net/current/java/base64/
+
+ This product contains a modified portion of 'Webbit', an event based
+ WebSocket and HTTP server, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.webbit.txt (BSD License)
+ * HOMEPAGE:
+ * https://github.com/joewalnes/webbit
+
+ This product contains a modified portion of 'SLF4J', a simple logging
+ facade for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.slf4j.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.slf4j.org/
+
+ This product contains a modified portion of 'Apache Harmony', an open source
+ Java SE, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.harmony.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://archive.apache.org/dist/harmony/
+
+ This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+ and decompression library written by Matthew J. Francis. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jbzip2.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jbzip2/
+
+ This product contains a modified portion of 'libdivsufsort', a C API library to construct
+ the suffix array and the Burrows-Wheeler transformed string for any input string of
+ a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.libdivsufsort.txt (MIT License)
+ * HOMEPAGE:
+ * https://github.com/y-256/libdivsufsort
+
+ This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jctools.txt (ASL2 License)
+ * HOMEPAGE:
+ * https://github.com/JCTools/JCTools
+
+ This product optionally depends on 'JZlib', a re-implementation of zlib in
+ pure Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jzlib.txt (BSD style License)
+ * HOMEPAGE:
+ * http://www.jcraft.com/jzlib/
+
+ This product optionally depends on 'Compress-LZF', a Java library for encoding and
+ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/ning/compress
+
+ This product optionally depends on 'lz4', a LZ4 Java compression
+ and decompression library written by Adrien Grand. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lz4.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jpountz/lz4-java
+
+ This product optionally depends on 'lzma-java', a LZMA Java compression
+ and decompression library, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lzma-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jponge/lzma-java
+
+ This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+ and decompression library written by William Kinney. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jfastlz.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jfastlz/
+
+ This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+ interchange format, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.protobuf.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/protobuf
+
+ This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+ a temporary self-signed X.509 certificate when the JVM does not provide the
+ equivalent functionality. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.bouncycastle.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.bouncycastle.org/
+
+ This product optionally depends on 'Snappy', a compression library produced
+ by Google Inc, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.snappy.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/snappy
+
+ This product optionally depends on 'JBoss Marshalling', an alternative Java
+ serialization API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
+ * HOMEPAGE:
+ * http://www.jboss.org/jbossmarshalling
+
+ This product optionally depends on 'Caliper', Google's micro-
+ benchmarking framework, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.caliper.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/google/caliper
+
+ This product optionally depends on 'Apache Log4J', a logging framework, which
+ can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.log4j.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://logging.apache.org/log4j/
+
+ This product optionally depends on 'Aalto XML', an ultra-high performance
+ non-blocking XML processor, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://wiki.fasterxml.com/AaltoHome
+
+ This product contains a modified version of 'HPACK', a Java implementation of
+ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.hpack.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/twitter/hpack
+
+ This product contains a modified portion of 'Apache Commons Lang', a Java library
+ provides utilities for the java.lang API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.commons-lang.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://commons.apache.org/proper/commons-lang/
+
+ This product contains a forked and modified version of Tomcat Native
+
+ * LICENSE:
+ * ASL2
+ * HOMEPAGE:
+ * http://tomcat.apache.org/native-doc/
+ * https://svn.apache.org/repos/asf/tomcat/native/
+
+ (ASLv2) Objenesis
+ The following NOTICE information applies:
+ Objenesis
+ Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita
+
+ (ASLv2) Snappy Java
+ The following NOTICE information applies:
+ This product includes software developed by Google
+ Snappy: http://code.google.com/p/snappy/ (New BSD License)
+
+ This product includes software developed by Apache
+ PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
+ (Apache 2.0 license)
+
+ This library containd statically linked libstdc++. This inclusion is allowed by
+ "GCC RUntime Library Exception"
+ http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
+
+************************
+Eclipse Public License 1.0
+************************
+
+The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
+
+ (EPL 1.0) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
+
+*****************
+Public Domain
+*****************
+
+The following binary components are provided to the 'Public Domain'. See project link for details.
+
+ (Public Domain) XZ for Java (org.tukaani:xz:jar:1.5 - http://tukaani.org/xz/java.html
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/pom.xml
new file mode 100644
index 000000000000..79c8164f8096
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/pom.xml
@@ -0,0 +1,98 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.nifi
+ nifi-cql-bundle
+ 2.5.0-SNAPSHOT
+
+
+ nifi-cql-processors
+ jar
+
+
+
+ org.apache.nifi
+ nifi-api
+
+
+ org.apache.nifi
+ nifi-utils
+
+
+ org.apache.nifi
+ nifi-properties
+
+
+ org.apache.nifi
+ nifi-ssl-context-service-api
+
+
+ org.apache.avro
+ avro
+
+
+ org.apache.nifi
+ nifi-cql-services-api
+ 2.5.0-SNAPSHOT
+ provided
+
+
+ org.apache.nifi
+ nifi-cassandra-session-provider-service
+ 2.5.0-SNAPSHOT
+ test
+
+
+ org.apache.nifi
+ nifi-record-serialization-service-api
+ compile
+
+
+ org.apache.nifi
+ nifi-record
+ compile
+
+
+
+ org.apache.nifi
+ nifi-mock
+
+
+ org.apache.nifi
+ nifi-mock-record-utils
+
+
+ org.apache.commons
+ commons-text
+
+
+
+ org.testcontainers
+ cassandra
+ ${testcontainers.version}
+ test
+
+
+ org.testcontainers
+ junit-jupiter
+ ${testcontainers.version}
+ test
+
+
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/AbstractCQLProcessor.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/AbstractCQLProcessor.java
new file mode 100644
index 000000000000..85fc9f2c1d21
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/AbstractCQLProcessor.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.cql;
+
+import org.apache.nifi.annotation.lifecycle.OnScheduled;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * AbstractCassandraProcessor is a base class for Cassandra processors and contains logic and variables common to most
+ * processors integrating with Apache Cassandra.
+ */
+public abstract class AbstractCQLProcessor extends AbstractProcessor {
+
+ // Common descriptors
+ static final PropertyDescriptor CONNECTION_PROVIDER_SERVICE = new PropertyDescriptor.Builder()
+ .name("Cassandra Connection Provider")
+ .description("Specifies the Cassandra connection providing controller service to be used to connect to Cassandra cluster.")
+ .required(true)
+ .identifiesControllerService(CQLExecutionService.class)
+ .build();
+
+ static final PropertyDescriptor CHARSET = new PropertyDescriptor.Builder()
+ .name("Character Set")
+ .description("Specifies the character set of the record data.")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+ .defaultValue("UTF-8")
+ .addValidator(StandardValidators.CHARACTER_SET_VALIDATOR)
+ .build();
+
+ static final Relationship REL_SUCCESS = new Relationship.Builder()
+ .name("success")
+ .description("A FlowFile is transferred to this relationship if the operation completed successfully.")
+ .build();
+
+ static final Relationship REL_FAILURE = new Relationship.Builder()
+ .name("failure")
+ .description("A FlowFile is transferred to this relationship if the operation failed.")
+ .build();
+
+ static final Relationship REL_RETRY = new Relationship.Builder().name("retry")
+ .description("A FlowFile is transferred to this relationship if the operation cannot be completed but attempting "
+ + "it again may succeed.")
+ .build();
+
+ protected static List descriptors = new ArrayList<>();
+
+ static {
+ descriptors.add(CONNECTION_PROVIDER_SERVICE);
+ descriptors.add(CHARSET);
+ }
+
+ protected final AtomicReference cqlSessionService = new AtomicReference<>(null);
+
+ @OnScheduled
+ public void onScheduled(ProcessContext context) {
+ CQLExecutionService sessionProvider = context.getProperty(CONNECTION_PROVIDER_SERVICE).asControllerService(CQLExecutionService.class);
+ cqlSessionService.set(sessionProvider);
+ }
+
+ public void stop(ProcessContext context) {
+
+ }
+}
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryCallback.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryCallback.java
new file mode 100644
index 000000000000..adf6579efd64
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryCallback.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql;
+
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.flowfile.attributes.FragmentAttributes;
+import org.apache.nifi.logging.ComponentLog;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.serialization.RecordSetWriter;
+import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.service.cql.api.CQLFieldInfo;
+import org.apache.nifi.service.cql.api.CQLQueryCallback;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import static org.apache.nifi.processors.cql.AbstractCQLProcessor.REL_SUCCESS;
+import static org.apache.nifi.processors.cql.ExecuteCQLQueryRecord.REL_ORIGINAL;
+
+public class ExecuteCQLQueryCallback implements CQLQueryCallback {
+ private final ProcessSession session;
+ private RecordSetWriterFactory writerFactory;
+ private RecordSetWriter recordWriter;
+ private ComponentLog logger;
+
+ private long currentIndex = 0;
+ private long rowsPerFlowFile;
+ private long flowFilesPerBatch;
+ private FlowFile parentFlowFile;
+
+ private boolean commitImmediately;
+ private final List flowFileBatch;
+ private FlowFile currentFlowFile;
+
+ private long recordsProcessed;
+ private int fragmentIndex;
+ private UUID fragmentId;
+
+ public ExecuteCQLQueryCallback(FlowFile parentFlowFile,
+ RecordSetWriterFactory writerFactory,
+ ProcessSession session,
+ ComponentLog logger,
+ long rowsPerFlowfile,
+ long flowFilesPerBatch) {
+ this.parentFlowFile = parentFlowFile;
+ this.writerFactory = writerFactory;
+ this.session = session;
+ this.logger = logger;
+
+ this.commitImmediately = flowFilesPerBatch > 0;
+ this.rowsPerFlowFile = rowsPerFlowfile;
+ this.flowFilesPerBatch = flowFilesPerBatch;
+
+ this.flowFileBatch = new ArrayList<>();
+ this.recordsProcessed = 0;
+ this.fragmentIndex = 0;
+ this.fragmentId = UUID.randomUUID();
+ }
+
+ private void updateFlowFileAttributes() {
+ Map attributes = Map.of(FragmentAttributes.FRAGMENT_COUNT.key(), String.valueOf(recordsProcessed),
+ FragmentAttributes.FRAGMENT_ID.key(), fragmentId.toString(),
+ FragmentAttributes.FRAGMENT_INDEX.key(), String.valueOf(fragmentIndex++),
+ "mime.type", recordWriter.getMimeType());
+
+ this.currentFlowFile = session.putAllAttributes(currentFlowFile, attributes);
+ this.recordsProcessed = 0;
+ flowFileBatch.add(currentFlowFile);
+ }
+
+ private void initWriter(RecordSchema schema) {
+ try {
+ if (recordWriter != null) {
+ recordWriter.finishRecordSet();
+ recordWriter.close();
+
+ updateFlowFileAttributes();
+
+ if (commitImmediately && flowFileBatch.size() == flowFilesPerBatch) {
+ session.transfer(flowFileBatch, REL_SUCCESS);
+
+ if (parentFlowFile != null) {
+ session.transfer(parentFlowFile, REL_ORIGINAL);
+ parentFlowFile = null;
+ }
+
+ session.commitAsync();
+ flowFileBatch.clear();
+ }
+ }
+
+ currentFlowFile = session.create();
+
+ recordWriter = writerFactory.createWriter(logger, schema, session.write(currentFlowFile), currentFlowFile);
+ recordWriter.beginRecordSet();
+ } catch (Exception ex) {
+ flowFileBatch.forEach(session::remove);
+
+ throw new ProcessException("Error creating record writer", ex);
+ }
+ }
+
+ @Override
+ public void receive(long rowNumber,
+ org.apache.nifi.serialization.record.Record result, List fields, boolean hasMore) {
+ if (recordWriter == null || ++currentIndex % rowsPerFlowFile == 0) {
+ initWriter(result.getSchema());
+ }
+
+ try {
+ recordWriter.write(result);
+ recordsProcessed++;
+
+ if (!hasMore) {
+ recordWriter.finishRecordSet();
+ recordWriter.close();
+
+ updateFlowFileAttributes();
+
+ if (parentFlowFile != null) {
+ session.transfer(parentFlowFile, REL_ORIGINAL);
+ }
+
+ session.transfer(flowFileBatch, REL_SUCCESS);
+ flowFileBatch.clear();
+ }
+
+ } catch (Exception ex) {
+ try {
+ recordWriter.close();
+ } catch (Exception e) {
+ logger.error("Error closing record writer", e);
+ }
+
+ if (!flowFileBatch.isEmpty()) {
+ flowFileBatch.forEach(session::remove);
+ }
+
+ throw new ProcessException("Error writing record", ex);
+ }
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecord.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecord.java
new file mode 100644
index 000000000000..a360867de81b
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecord.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.cql;
+
+import org.apache.nifi.annotation.behavior.InputRequirement;
+import org.apache.nifi.annotation.behavior.WritesAttribute;
+import org.apache.nifi.annotation.behavior.WritesAttributes;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnScheduled;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.logging.ComponentLog;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.exception.QueryFailureException;
+import org.apache.nifi.util.StopWatch;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.nifi.service.cql.api.CQLExecutionService.FETCH_SIZE;
+
+@Tags({"cassandra", "cql", "select"})
+@InputRequirement(InputRequirement.Requirement.INPUT_ALLOWED)
+@CapabilityDescription("Execute provided Cassandra Query Language (CQL) select query on a Cassandra 1.x, 2.x, or 3.0.x cluster. Query result "
+ + "may be converted to Avro or JSON format. Streaming is used so arbitrarily large result sets are supported. This processor can be "
+ + "scheduled to run on a timer, or cron expression, using the standard scheduling methods, or it can be triggered by an incoming FlowFile. "
+ + "If it is triggered by an incoming FlowFile, then attributes of that FlowFile will be available when evaluating the "
+ + "select query. FlowFile attribute 'executecql.row.count' indicates how many rows were selected.")
+@WritesAttributes({
+ @WritesAttribute(attribute = "fragment.identifier", description = "If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set "
+ + "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."),
+ @WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of "
+ + "FlowFiles produced by a single ResultSet. This can be used in conjunction with the "
+ + "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this "
+ + "attribute will not be populated."),
+ @WritesAttribute(attribute = "fragment.index", description = "If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of "
+ + "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be "
+ + "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order "
+ + "FlowFiles were produced")
+})
+public class ExecuteCQLQueryRecord extends AbstractCQLProcessor {
+
+ public static final PropertyDescriptor CQL_SELECT_QUERY = new PropertyDescriptor.Builder()
+ .name("CQL select query")
+ .description("CQL select query")
+ .required(true)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+ .build();
+
+ public static final PropertyDescriptor QUERY_TIMEOUT = new PropertyDescriptor.Builder()
+ .name("Max Wait Time")
+ .description("The maximum amount of time allowed for a running CQL select query. Must be of format "
+ + " where is a non-negative integer and TimeUnit is a supported "
+ + "Time Unit, such as: nanos, millis, secs, mins, hrs, days. A value of zero means there is no limit. ")
+ .defaultValue("0 seconds")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+ .addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
+ .build();
+
+ public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder()
+ .name("Max Rows Per Flow File")
+ .description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large "
+ + "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.")
+ .defaultValue("0")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.INTEGER_VALIDATOR)
+ .build();
+
+ public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder()
+ .name("Output Batch Size")
+ .description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows "
+ + "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles "
+ + "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will "
+ + "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The maxvalue.* and fragment.count attributes will not be set on FlowFiles when this "
+ + "property is set.")
+ .defaultValue("0")
+ .required(true)
+ .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .build();
+
+ public static final PropertyDescriptor OUTPUT_WRITER = new PropertyDescriptor.Builder()
+ .name("Result Set Output Writer")
+ .identifiesControllerService(RecordSetWriterFactory.class)
+ .required(true)
+ .description("The controller service to use for writing the results to a flowfile")
+ .build();
+
+ public static final Relationship REL_ORIGINAL = new Relationship.Builder()
+ .autoTerminateDefault(true)
+ .name("original")
+ .description("Input flowfiles go to this relationship on success and to failure when the query fails")
+ .build();
+
+ public static final List PROPERTY_DESCRIPTORS = List.of(
+ CONNECTION_PROVIDER_SERVICE,
+ CHARSET,
+ OUTPUT_WRITER,
+ CQL_SELECT_QUERY,
+ FETCH_SIZE,
+ QUERY_TIMEOUT,
+ FETCH_SIZE,
+ MAX_ROWS_PER_FLOW_FILE,
+ OUTPUT_BATCH_SIZE
+ );
+
+ public static final Set RELATIONSHIPS = Set.of(REL_SUCCESS, REL_ORIGINAL, REL_FAILURE, REL_RETRY);
+
+ @Override
+ public Set getRelationships() {
+ return RELATIONSHIPS;
+ }
+
+ @Override
+ public final List getSupportedPropertyDescriptors() {
+ return PROPERTY_DESCRIPTORS;
+ }
+
+ @OnScheduled
+ public void onScheduled(final ProcessContext context) {
+ super.onScheduled(context);
+ }
+
+ @Override
+ public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
+ FlowFile fileToProcess = null;
+
+ if (context.hasIncomingConnection()) {
+ fileToProcess = session.get();
+
+ // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
+ // However, if we have no FlowFile and we have connections coming from other Processors, then
+ // we know that we should run only if we have a FlowFile.
+ if (fileToProcess == null && context.hasNonLoopConnection()) {
+ return;
+ }
+ }
+
+ final ComponentLog logger = getLogger();
+ final String selectQuery = context.getProperty(CQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess).getValue();
+ final long maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger();
+ final long outputBatchSize = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger();
+ final StopWatch stopWatch = new StopWatch(true);
+
+ final RecordSetWriterFactory writerFactory = context.getProperty(OUTPUT_WRITER).asControllerService(RecordSetWriterFactory.class);
+ final CQLExecutionService CQLExecutionService = context.getProperty(CONNECTION_PROVIDER_SERVICE)
+ .asControllerService(CQLExecutionService.class);
+
+ try {
+ stopWatch.start();
+
+ ExecuteCQLQueryCallback callback = new ExecuteCQLQueryCallback(fileToProcess, writerFactory, session,
+ getLogger(), maxRowsPerFlowFile, outputBatchSize);
+
+ CQLExecutionService.query(selectQuery, false, new ArrayList<>(), callback);
+
+ stopWatch.stop();
+
+ getLogger().debug("The query took {} seconds.", stopWatch.getDuration(TimeUnit.SECONDS));
+ } catch (final QueryFailureException qee) {
+ //The logger is called in the client service
+ if (fileToProcess == null) {
+ fileToProcess = session.create();
+ }
+ fileToProcess = session.penalize(fileToProcess);
+ session.transfer(fileToProcess, REL_RETRY);
+ } catch (final ProcessException e) {
+ if (context.hasIncomingConnection()) {
+ logger.error(String.format("Unable to execute CQL select query %s for %s routing to failure",
+ selectQuery, fileToProcess), e);
+ if (fileToProcess == null) {
+ fileToProcess = session.create();
+ }
+ fileToProcess = session.penalize(fileToProcess);
+ session.transfer(fileToProcess, REL_FAILURE);
+
+ } else {
+ logger.error(String.format("Unable to execute CQL select query %s",
+ selectQuery), e);
+ context.yield();
+ }
+ }
+ session.commitAsync();
+ }
+}
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/PutCQLRecord.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/PutCQLRecord.java
new file mode 100644
index 000000000000..32cb1643f9ef
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/PutCQLRecord.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.cql;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.nifi.annotation.behavior.InputRequirement;
+import org.apache.nifi.annotation.behavior.ReadsAttribute;
+import org.apache.nifi.annotation.behavior.ReadsAttributes;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnShutdown;
+import org.apache.nifi.annotation.lifecycle.OnUnscheduled;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.components.ValidationContext;
+import org.apache.nifi.components.ValidationResult;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.processors.cql.constants.BatchStatementType;
+import org.apache.nifi.processors.cql.constants.StatementType;
+import org.apache.nifi.processors.cql.constants.UpdateType;
+import org.apache.nifi.serialization.RecordReader;
+import org.apache.nifi.serialization.RecordReaderFactory;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+import org.apache.nifi.util.StopWatch;
+
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Stream;
+
+import static java.lang.String.format;
+import static java.util.Collections.EMPTY_LIST;
+import static org.apache.nifi.processors.cql.constants.BatchStatementType.BATCH_STATEMENT_TYPE_USE_ATTR_TYPE;
+import static org.apache.nifi.processors.cql.constants.BatchStatementType.COUNTER_TYPE;
+import static org.apache.nifi.processors.cql.constants.BatchStatementType.LOGGED_TYPE;
+import static org.apache.nifi.processors.cql.constants.BatchStatementType.UNLOGGED_TYPE;
+import static org.apache.nifi.processors.cql.constants.StatementType.INSERT_TYPE;
+import static org.apache.nifi.processors.cql.constants.StatementType.STATEMENT_TYPE_USE_ATTR_TYPE;
+import static org.apache.nifi.processors.cql.constants.StatementType.UPDATE_TYPE;
+import static org.apache.nifi.processors.cql.constants.UpdateType.DECR_TYPE;
+import static org.apache.nifi.processors.cql.constants.UpdateType.INCR_TYPE;
+
+@Tags({"cassandra", "cql", "put", "insert", "update", "set", "record"})
+@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
+@CapabilityDescription("This is a record aware processor that reads the content of the incoming FlowFile as individual records using the " +
+ "configured 'Record Reader' and writes them to Apache Cassandra using native protocol version 3 or higher.")
+@ReadsAttributes({
+ @ReadsAttribute(attribute = "cql.statement.type", description = "If 'Use cql.statement.type Attribute' is selected for the Statement " +
+ "Type property, the value of the cql.statement.type Attribute will be used to determine which type of statement (UPDATE, INSERT) " +
+ "will be generated and executed"),
+ @ReadsAttribute(attribute = "cql.update.method", description = "If 'Use cql.update.method Attribute' is selected for the Update " +
+ "Method property, the value of the cql.update.method Attribute will be used to determine which operation (Set, Increment, Decrement) " +
+ "will be used to generate and execute the Update statement. Ignored if the Statement Type property is not set to UPDATE"),
+ @ReadsAttribute(attribute = "cql.batch.statement.type", description = "If 'Use cql.batch.statement.type Attribute' is selected for the Batch " +
+ "Statement Type property, the value of the cql.batch.statement.type Attribute will be used to determine which type of batch statement " +
+ "(LOGGED, UNLOGGED, COUNTER) will be generated and executed")
+})
+public class PutCQLRecord extends AbstractCQLProcessor {
+ static final String STATEMENT_TYPE_ATTRIBUTE = "cql.statement.type";
+
+ static final String UPDATE_METHOD_ATTRIBUTE = "cql.update.method";
+
+ static final String BATCH_STATEMENT_TYPE_ATTRIBUTE = "cql.batch.statement.type";
+
+ static final PropertyDescriptor RECORD_READER_FACTORY = new PropertyDescriptor.Builder()
+ .name("Record Reader")
+ .description("Specifies the type of Record Reader controller service to use for parsing the incoming data " +
+ "and determining the schema")
+ .identifiesControllerService(RecordReaderFactory.class)
+ .required(true)
+ .build();
+
+ static final PropertyDescriptor STATEMENT_TYPE = new PropertyDescriptor.Builder()
+ .name("Statement Type")
+ .description("Specifies the type of CQL Statement to generate.")
+ .required(true)
+ .defaultValue(INSERT_TYPE.getValue())
+ .allowableValues(StatementType.class)
+ .build();
+
+ static final PropertyDescriptor UPDATE_METHOD = new PropertyDescriptor.Builder()
+ .name("Update Method")
+ .description("Specifies the method to use to SET the values. This property is used if the Statement Type is " +
+ "UPDATE and ignored otherwise.")
+ .required(false)
+ .defaultValue(UpdateType.SET_TYPE.getValue())
+ .allowableValues(UpdateType.class)
+ .build();
+
+ static final PropertyDescriptor UPDATE_KEYS = new PropertyDescriptor.Builder()
+ .name("Update Keys")
+ .description("A comma-separated list of column names that uniquely identifies a row in the database for UPDATE statements. "
+ + "If the Statement Type is UPDATE and this property is not set, the conversion to CQL will fail. "
+ + "This property is ignored if the Statement Type is not UPDATE.")
+ .addValidator(StandardValidators.createListValidator(true, false, StandardValidators.NON_EMPTY_VALIDATOR))
+ .required(false)
+ .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+ .build();
+
+ static final PropertyDescriptor TABLE = new PropertyDescriptor.Builder()
+ .name("Table name")
+ .description("The name of the Cassandra table to which the records have to be written.")
+ .required(true)
+ .addValidator(StandardValidators.NON_EMPTY_EL_VALIDATOR)
+ .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+ .build();
+
+ static final PropertyDescriptor BATCH_SIZE = new PropertyDescriptor.Builder()
+ .name("Batch size")
+ .description("Specifies the number of 'Insert statements' to be grouped together to execute as a batch (BatchStatement)")
+ .defaultValue("100")
+ .addValidator(StandardValidators.POSITIVE_INTEGER_VALIDATOR)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .required(true)
+ .build();
+
+ static final PropertyDescriptor BATCH_STATEMENT_TYPE = new PropertyDescriptor.Builder()
+ .name("Batch Statement Type")
+ .description("Specifies the type of 'Batch Statement' to be used.")
+ .allowableValues(BatchStatementType.class)
+ .defaultValue(LOGGED_TYPE.getValue())
+ .required(false)
+ .build();
+
+ private final static List propertyDescriptors = Collections.unmodifiableList(Arrays.asList(
+ CONNECTION_PROVIDER_SERVICE, TABLE, STATEMENT_TYPE, UPDATE_KEYS, UPDATE_METHOD,
+ RECORD_READER_FACTORY, BATCH_SIZE, BATCH_STATEMENT_TYPE));
+
+ private final static Set relationships = Collections.unmodifiableSet(
+ new HashSet<>(Arrays.asList(REL_SUCCESS, REL_FAILURE)));
+
+ @Override
+ protected List getSupportedPropertyDescriptors() {
+ return propertyDescriptors;
+ }
+
+ @Override
+ public Set getRelationships() {
+ return relationships;
+ }
+
+ @Override
+ public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
+ FlowFile inputFlowFile = session.get();
+
+ if (inputFlowFile == null) {
+ return;
+ }
+
+ final String cassandraTable = context.getProperty(TABLE).evaluateAttributeExpressions(inputFlowFile).getValue();
+ final RecordReaderFactory recordParserFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
+ final int batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger();
+ final String updateKeys = context.getProperty(UPDATE_KEYS).evaluateAttributeExpressions(inputFlowFile).getValue();
+
+ // Get the statement type from the attribute if necessary
+ final String statementTypeProperty = context.getProperty(STATEMENT_TYPE).getValue();
+ String statementType = statementTypeProperty;
+ if (STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equals(statementTypeProperty)) {
+ statementType = inputFlowFile.getAttribute(STATEMENT_TYPE_ATTRIBUTE);
+ }
+
+ // Get the update method from the attribute if necessary
+ final String updateMethodProperty = context.getProperty(UPDATE_METHOD).getValue();
+ String updateMethod = updateMethodProperty;
+ if (UpdateType.UPDATE_METHOD_USE_ATTR_TYPE.getValue().equals(updateMethodProperty)) {
+ updateMethod = inputFlowFile.getAttribute(UPDATE_METHOD_ATTRIBUTE);
+ }
+
+
+ // Get the batch statement type from the attribute if necessary
+ final String batchStatementTypeProperty = context.getProperty(BATCH_STATEMENT_TYPE).getValue();
+ String batchStatementType = batchStatementTypeProperty;
+ if (BATCH_STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equals(batchStatementTypeProperty)) {
+ batchStatementType = inputFlowFile.getAttribute(BATCH_STATEMENT_TYPE_ATTRIBUTE).toUpperCase();
+ }
+ if (StringUtils.isEmpty(batchStatementType)) {
+ throw new IllegalArgumentException(format("Batch Statement Type is not specified, FlowFile %s", inputFlowFile));
+ }
+
+ final AtomicInteger recordsAdded = new AtomicInteger(0);
+ final StopWatch stopWatch = new StopWatch(true);
+
+ boolean error = false;
+
+ try (final InputStream inputStream = session.read(inputFlowFile);
+ final RecordReader reader = recordParserFactory.createRecordReader(inputFlowFile, inputStream, getLogger())) {
+
+ // throw an exception if statement type is not set
+ if (StringUtils.isEmpty(statementType)) {
+ throw new IllegalArgumentException(format("Statement Type is not specified, FlowFile %s", inputFlowFile));
+ }
+
+ // throw an exception if the statement type is set to update and updateKeys is empty
+ if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType) && StringUtils.isEmpty(updateKeys)) {
+ throw new IllegalArgumentException(format("Update Keys are not specified, FlowFile %s", inputFlowFile));
+ }
+
+ final List updateKeyNames = UPDATE_TYPE.getValue().equalsIgnoreCase(statementType) ? Stream.of(updateKeys
+ .split(","))
+ .map(key -> key.trim())
+ .filter(key -> StringUtils.isNotEmpty(key))
+ .toList() : EMPTY_LIST;
+
+ // throw an exception if the Update Method is Increment or Decrement and the batch statement type is not UNLOGGED or COUNTER
+ if (INCR_TYPE.getValue().equalsIgnoreCase(updateMethod) || DECR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
+ if (!(UNLOGGED_TYPE.getValue().equalsIgnoreCase(batchStatementType) || COUNTER_TYPE.getValue().equalsIgnoreCase(batchStatementType))) {
+ throw new IllegalArgumentException(format("Increment/Decrement Update Method can only be used with COUNTER " +
+ "or UNLOGGED Batch Statement Type, FlowFile %s", inputFlowFile));
+ }
+ }
+
+ Record record;
+ List recordsBatch = new ArrayList<>();
+ CQLExecutionService sessionProviderService = super.cqlSessionService.get();
+
+ while ((record = reader.nextRecord()) != null) {
+ recordsBatch.add(record);
+
+ if (recordsBatch.size() == batchSize) {
+ if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType)) {
+ sessionProviderService.update(cassandraTable, recordsBatch, updateKeyNames, UpdateMethod.valueOf(updateMethod));
+ } else {
+ sessionProviderService.insert(cassandraTable, recordsBatch);
+ }
+ recordsBatch.clear();
+ }
+ }
+
+ if (!recordsBatch.isEmpty()) {
+ if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType)) {
+ sessionProviderService.update(cassandraTable, recordsBatch, updateKeyNames, UpdateMethod.valueOf(updateMethod));
+ } else {
+ sessionProviderService.insert(cassandraTable, recordsBatch);
+ }
+ recordsBatch.clear();
+ }
+
+ } catch (Exception e) {
+ error = true;
+ getLogger().error("Unable to write the records into Cassandra table", e);
+ session.transfer(inputFlowFile, REL_FAILURE);
+ } finally {
+ if (!error) {
+ stopWatch.stop();
+ long duration = stopWatch.getDuration(TimeUnit.MILLISECONDS);
+
+ session.getProvenanceReporter().send(inputFlowFile, super.cqlSessionService.get().getTransitUrl(cassandraTable), "Inserted " + recordsAdded.get() + " records", duration);
+ session.transfer(inputFlowFile, REL_SUCCESS);
+ }
+ }
+
+ }
+
+ @Override
+ protected Collection customValidate(ValidationContext validationContext) {
+ Set results = (Set) super.customValidate(validationContext);
+
+ String statementType = validationContext.getProperty(STATEMENT_TYPE).getValue();
+
+ if (UPDATE_TYPE.getValue().equalsIgnoreCase(statementType)) {
+ // Check that update keys are set
+ String updateKeys = validationContext.getProperty(UPDATE_KEYS).getValue();
+ if (StringUtils.isEmpty(updateKeys)) {
+ results.add(new ValidationResult.Builder().subject("Update statement configuration").valid(false).explanation(
+ "if the Statement Type is set to Update, then the Update Keys must be specified as well").build());
+ }
+
+ // Check that if the update method is set to increment or decrement that the batch statement type is set to
+ // unlogged or counter (or USE_ATTR_TYPE, which we cannot check at this point).
+ String updateMethod = validationContext.getProperty(UPDATE_METHOD).getValue();
+ String batchStatementType = validationContext.getProperty(BATCH_STATEMENT_TYPE).getValue();
+ if (INCR_TYPE.getValue().equalsIgnoreCase(updateMethod)
+ || DECR_TYPE.getValue().equalsIgnoreCase(updateMethod)) {
+ if (!(COUNTER_TYPE.getValue().equalsIgnoreCase(batchStatementType)
+ || UNLOGGED_TYPE.getValue().equalsIgnoreCase(batchStatementType)
+ || BATCH_STATEMENT_TYPE_USE_ATTR_TYPE.getValue().equalsIgnoreCase(batchStatementType))) {
+ results.add(new ValidationResult.Builder().subject("Update method configuration").valid(false).explanation(
+ "if the Update Method is set to Increment or Decrement, then the Batch Statement Type must be set " +
+ "to either COUNTER or UNLOGGED").build());
+ }
+ }
+ }
+
+ return results;
+ }
+
+
+ @OnUnscheduled
+ public void stop(ProcessContext context) {
+ super.stop(context);
+ }
+
+ @OnShutdown
+ public void shutdown(ProcessContext context) {
+ super.stop(context);
+ }
+
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/BatchStatementType.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/BatchStatementType.java
new file mode 100644
index 000000000000..1c5205c0f465
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/BatchStatementType.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql.constants;
+
+import org.apache.nifi.components.DescribedValue;
+
+public enum BatchStatementType implements DescribedValue {
+ LOGGED_TYPE("LOGGED", "LOGGED",
+ "Use a LOGGED batch statement"),
+ UNLOGGED_TYPE("UNLOGGED", "UNLOGGED",
+ "Use an UNLOGGED batch statement"),
+ COUNTER_TYPE("COUNTER", "COUNTER",
+ "Use a COUNTER batch statement"),
+ BATCH_STATEMENT_TYPE_USE_ATTR_TYPE("USE_ATTR", "Use cql.batch.statement.type Attribute",
+ "The value of the cql.batch.statement.type Attribute will be used to determine which type of batch statement (LOGGED, UNLOGGED or COUNTER) " +
+ "will be used to generate and execute the Update statement.");
+
+ private String value;
+ private String displayName;
+ private String description;
+
+ BatchStatementType(String value, String displayName, String description) {
+ this.value = value;
+ this.displayName = displayName;
+ this.description = description;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String getDisplayName() {
+ return displayName;
+ }
+
+ @Override
+ public String getDescription() {
+ return description;
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/StatementType.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/StatementType.java
new file mode 100644
index 000000000000..889114fb0c68
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/StatementType.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql.constants;
+
+import org.apache.nifi.components.DescribedValue;
+
+public enum StatementType implements DescribedValue {
+ UPDATE_TYPE("UPDATE", "UPDATE",
+ "Use an UPDATE statement."),
+ INSERT_TYPE("INSERT", "INSERT",
+ "Use an INSERT statement."),
+ STATEMENT_TYPE_USE_ATTR_TYPE("USE_ATTR", "Use cql.statement.type Attribute",
+ "The value of the cql.statement.type Attribute will be used to determine which type of statement (UPDATE, INSERT) " +
+ "will be generated and executed");
+
+ private String value;
+ private String displayName;
+ private String description;
+
+ StatementType(String value, String displayName, String description) {
+ this.value = value;
+ this.displayName = displayName;
+ this.description = description;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String getDisplayName() {
+ return displayName;
+ }
+
+ @Override
+ public String getDescription() {
+ return description;
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/UpdateType.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/UpdateType.java
new file mode 100644
index 000000000000..c09a93fa285b
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/java/org/apache/nifi/processors/cql/constants/UpdateType.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql.constants;
+
+import org.apache.nifi.components.DescribedValue;
+
+public enum UpdateType implements DescribedValue {
+ INCR_TYPE("INCREMENT", "Increment",
+ "Use an increment operation (+=) for the Update statement."),
+ SET_TYPE("SET", "Set",
+ "Use a set operation (=) for the Update statement."),
+ DECR_TYPE("DECREMENT", "Decrement",
+ "Use a decrement operation (-=) for the Update statement."),
+ UPDATE_METHOD_USE_ATTR_TYPE("USE_ATTR", "Use cql.update.method Attribute",
+ "The value of the cql.update.method Attribute will be used to determine which operation (Set, Increment, Decrement) " +
+ "will be used to generate and execute the Update statement.");
+ private String value;
+ private String displayName;
+ private String description;
+
+ UpdateType(String value, String displayName, String description) {
+ this.value = value;
+ this.displayName = displayName;
+ this.description = description;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String getDisplayName() {
+ return displayName;
+ }
+
+ @Override
+ public String getDescription() {
+ return description;
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor
new file mode 100644
index 000000000000..de9057cf2797
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+org.apache.nifi.processors.cql.ExecuteCQLQueryRecord
+org.apache.nifi.processors.cql.PutCQLRecord
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecordTest.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecordTest.java
new file mode 100644
index 000000000000..31e3e9c0ea96
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/ExecuteCQLQueryRecordTest.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql;
+
+import org.apache.nifi.flowfile.attributes.FragmentAttributes;
+import org.apache.nifi.processors.cql.mock.MockCQLQueryExecutionService;
+import org.apache.nifi.serialization.SimpleRecordSchema;
+import org.apache.nifi.serialization.record.MapRecord;
+import org.apache.nifi.serialization.record.MockRecordWriter;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.serialization.record.RecordField;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.util.MockFlowFile;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class ExecuteCQLQueryRecordTest {
+ private TestRunner testRunner;
+
+ @BeforeEach
+ public void setup() {
+ testRunner = TestRunners.newTestRunner(ExecuteCQLQueryRecord.class);
+ testRunner.setProperty(ExecuteCQLQueryRecord.CQL_SELECT_QUERY, "SELECT * FROM cql_table");
+ }
+
+ @DisplayName("Verify the normal write behavior with different record counts and expected batch sizes")
+ @ParameterizedTest(name = "rows per file={0},generated records={1},expected ff count={2}")
+ @CsvSource({
+ "5,4,1,0",
+ "5,100,20,0",
+ "5,101,21,0",
+ "5,100,20,1"
+ })
+ public void testSimpleWriteScenario(int rowsPerFlowFile, int recordCount,
+ int expectedFlowFileCount,
+ int outputBatchSize) throws Exception {
+ testRunner.setProperty(ExecuteCQLQueryRecord.MAX_ROWS_PER_FLOW_FILE, String.valueOf(rowsPerFlowFile));
+ testRunner.setProperty(ExecuteCQLQueryRecord.OUTPUT_BATCH_SIZE, String.valueOf(outputBatchSize));
+
+ MockRecordWriter parser = new MockRecordWriter();
+ RecordField field1 = new RecordField("a", RecordFieldType.STRING.getDataType());
+ RecordField field2 = new RecordField("b", RecordFieldType.STRING.getDataType());
+ SimpleRecordSchema schema = new SimpleRecordSchema(List.of(field1, field2));
+
+ ArrayList data = new ArrayList<>();
+
+ for (int i = 0; i < recordCount; i++) {
+ Map rec = Map.of("a", UUID.randomUUID().toString(), "b", UUID.randomUUID().toString());
+ MapRecord testData = new MapRecord(schema, rec);
+ data.add(testData);
+ }
+
+ MockCQLQueryExecutionService service = new MockCQLQueryExecutionService(data.iterator());
+
+ testRunner.setProperty(ExecuteCQLQueryRecord.CONNECTION_PROVIDER_SERVICE, "connection");
+ testRunner.addControllerService("connection", service);
+ testRunner.enableControllerService(service);
+ testRunner.setProperty(ExecuteCQLQueryRecord.OUTPUT_WRITER, "writer");
+ testRunner.addControllerService("writer", parser);
+ testRunner.enableControllerService(parser);
+ testRunner.assertValid();
+
+ testRunner.enqueue("parent_file");
+ testRunner.run();
+
+ testRunner.assertTransferCount(ExecuteCQLQueryRecord.REL_ORIGINAL, 1);
+ testRunner.assertTransferCount(ExecuteCQLQueryRecord.REL_SUCCESS, expectedFlowFileCount);
+
+ List flowFiles = testRunner.getFlowFilesForRelationship(ExecuteCQLQueryRecord.REL_SUCCESS);
+ assertEquals(expectedFlowFileCount, flowFiles.size());
+ flowFiles.forEach(ff -> {
+ assertNotNull(ff.getAttribute(FragmentAttributes.FRAGMENT_ID.key()));
+ assertNotNull(ff.getAttribute(FragmentAttributes.FRAGMENT_COUNT.key()));
+ assertNotNull(ff.getAttribute(FragmentAttributes.FRAGMENT_INDEX.key()));
+
+ int fragmentIndex = Integer.parseInt(ff.getAttribute(FragmentAttributes.FRAGMENT_INDEX.key()));
+ int rowCount = Integer.parseInt(ff.getAttribute(FragmentAttributes.FRAGMENT_COUNT.key()));
+ String fragmentId = ff.getAttribute(FragmentAttributes.FRAGMENT_ID.key());
+
+ assertTrue(fragmentIndex < expectedFlowFileCount);
+ assertTrue(rowCount > 0 && rowCount <= rowsPerFlowFile);
+
+ assertDoesNotThrow(() -> UUID.fromString(fragmentId));
+ });
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/PutCQLRecordTest.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/PutCQLRecordTest.java
new file mode 100644
index 000000000000..ef502e3d6dc5
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/PutCQLRecordTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql;
+
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.serialization.record.MockRecordParser;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
+import org.mockito.Mockito;
+
+import java.util.List;
+
+import static org.apache.nifi.processors.cql.constants.StatementType.UPDATE_TYPE;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class PutCQLRecordTest {
+ private TestRunner runner;
+ private CQLExecutionService service;
+ private MockRecordParser mockReader;
+
+ @BeforeEach
+ void setUp() throws Exception {
+ runner = TestRunners.newTestRunner(PutCQLRecord.class);
+ service = Mockito.mock(CQLExecutionService.class);
+ mockReader = new MockRecordParser();
+ mockReader.addSchemaField("message", RecordFieldType.STRING);
+ mockReader.addSchemaField("sender", RecordFieldType.STRING);
+
+ when(service.getIdentifier()).thenReturn("executionService");
+
+ runner.setProperty(PutCQLRecord.CONNECTION_PROVIDER_SERVICE, service.getIdentifier());
+ runner.setProperty(PutCQLRecord.TABLE, "message");
+ runner.setProperty(PutCQLRecord.RECORD_READER_FACTORY, "reader");
+
+ runner.addControllerService("reader", mockReader);
+ runner.enableControllerService(mockReader);
+
+ runner.addControllerService(service.getIdentifier(), service);
+ runner.enableControllerService(service);
+ }
+
+ @Test
+ void testInsert() {
+ final int recordCount = 1000;
+ final int batchCount = 10;
+
+ runner.setProperty(PutCQLRecord.BATCH_SIZE, "100");
+
+ for (int i = 0; i < recordCount; i++) {
+ mockReader.addRecord("Hello, world", "test_user");
+ }
+
+ runner.enqueue("");
+ runner.run();
+
+ runner.assertAllFlowFilesTransferred(PutCQLRecord.REL_SUCCESS, 1);
+
+ verify(service, times(batchCount))
+ .insert(eq("message"), anyList());
+ }
+
+ @ParameterizedTest
+ @CsvSource({ "DECREMENT,COUNTER", "INCREMENT,COUNTER", "SET,LOGGED" })
+ void testUpdate(UpdateMethod updateMethod, String batchStatementType) {
+ runner.setProperty(PutCQLRecord.UPDATE_KEYS, "sender");
+ runner.setProperty(PutCQLRecord.STATEMENT_TYPE, UPDATE_TYPE.getValue());
+ runner.setProperty(PutCQLRecord.UPDATE_METHOD, updateMethod.name());
+ runner.setProperty(PutCQLRecord.BATCH_STATEMENT_TYPE, batchStatementType);
+
+ final int recordCount = 1050;
+ final int batchCount = 11;
+
+ runner.setProperty(PutCQLRecord.BATCH_SIZE, "100");
+
+ for (int i = 0; i < recordCount; i++) {
+ mockReader.addRecord("Hello, world", "test_user");
+ }
+
+ runner.enqueue("");
+ runner.run();
+
+ runner.assertAllFlowFilesTransferred(PutCQLRecord.REL_SUCCESS, 1);
+
+ verify(service, times(batchCount))
+ .update(eq("message"), anyList(), eq(List.of("sender")), eq(updateMethod));
+ }
+
+ @Test
+ void testErrorHandler() {
+ final int recordCount = 1;
+
+ runner.setProperty(PutCQLRecord.BATCH_SIZE, "100");
+
+ for (int i = 0; i < recordCount; i++) {
+ mockReader.addRecord("Hello, world", "test_user");
+ }
+
+ doThrow(new ProcessException("Test"))
+ .when(service)
+ .insert(anyString(), anyList());
+ runner.enqueue("");
+ runner.run();
+ runner.assertAllFlowFilesTransferred(PutCQLRecord.REL_FAILURE, 1);
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/mock/MockCQLQueryExecutionService.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/mock/MockCQLQueryExecutionService.java
new file mode 100644
index 000000000000..d648f08c0d1d
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-processors/src/test/java/org/apache/nifi/processors/cql/mock/MockCQLQueryExecutionService.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.processors.cql.mock;
+
+import org.apache.nifi.controller.AbstractControllerService;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.CQLQueryCallback;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * This test class exists to support @see org.apache.nifi.processors.cassandra.ExecuteCQLQueryRecordTest
+ */
+public class MockCQLQueryExecutionService extends AbstractControllerService implements CQLExecutionService {
+ private final Iterator records;
+
+ public MockCQLQueryExecutionService(Iterator records) {
+ this.records = records;
+ }
+
+ @Override
+ public void query(String cql, boolean cacheStatement, List parameters, CQLQueryCallback callback) {
+ long rowNumber = 1;
+ while (records.hasNext()) {
+ callback.receive(rowNumber++, records.next(), new ArrayList<>(), records.hasNext());
+ }
+ }
+
+ @Override
+ public void insert(String table, org.apache.nifi.serialization.record.Record record) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void insert(String table, List records) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getTransitUrl(String tableName) {
+ return "";
+ }
+
+ @Override
+ public void delete(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void update(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys, UpdateMethod updateMethod) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void update(String cassandraTable, List records, List updateKeys, UpdateMethod updateMethod) {
+ throw new UnsupportedOperationException();
+ }
+}
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/pom.xml
new file mode 100644
index 000000000000..cd73c19e74e3
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/pom.xml
@@ -0,0 +1,41 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-cql-services-api-nar
+ nar
+
+
+
+ org.apache.nifi
+ nifi-standard-shared-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-cql-services-api
+ 2.5.0-SNAPSHOT
+ compile
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/LICENSE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 000000000000..1fb4e7c0f39f
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,266 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+This product bundles 'asm' which is available under a 3-Clause BSD style license.
+For details see http://asm.ow2.org/asmdex-license.html
+
+ Copyright (c) 2012 France Télécom
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+The binary distribution of this product bundles 'JNR x86asm' under an MIT
+style license.
+
+ Copyright (C) 2010 Wayne Meissner
+ Copyright (c) 2008-2009, Petr Kobalicek
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/NOTICE b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/NOTICE
new file mode 100644
index 000000000000..2d9d2aa14743
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api-nar/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,227 @@
+nifi-cassandra-services-api-nar
+Copyright 2016-2020 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+******************
+Apache Software License v2
+******************
+
+ (ASLv2) The Netty Project
+ The following NOTICE information applies:
+ Copyright 2014 The Netty Project
+ -------------------------------------------------------------------------------
+ This product contains the extensions to Java Collections Framework which has
+ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+ * LICENSE:
+ * license/LICENSE.jsr166y.txt (Public Domain)
+ * HOMEPAGE:
+ * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+ This product contains a modified version of Robert Harder's Public Domain
+ Base64 Encoder and Decoder, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.base64.txt (Public Domain)
+ * HOMEPAGE:
+ * http://iharder.sourceforge.net/current/java/base64/
+
+ This product contains a modified portion of 'Webbit', an event based
+ WebSocket and HTTP server, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.webbit.txt (BSD License)
+ * HOMEPAGE:
+ * https://github.com/joewalnes/webbit
+
+ This product contains a modified portion of 'SLF4J', a simple logging
+ facade for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.slf4j.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.slf4j.org/
+
+ This product contains a modified portion of 'Apache Harmony', an open source
+ Java SE, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.harmony.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://archive.apache.org/dist/harmony/
+
+ This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+ and decompression library written by Matthew J. Francis. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jbzip2.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jbzip2/
+
+ This product contains a modified portion of 'libdivsufsort', a C API library to construct
+ the suffix array and the Burrows-Wheeler transformed string for any input string of
+ a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.libdivsufsort.txt (MIT License)
+ * HOMEPAGE:
+ * https://github.com/y-256/libdivsufsort
+
+ This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jctools.txt (ASL2 License)
+ * HOMEPAGE:
+ * https://github.com/JCTools/JCTools
+
+ This product optionally depends on 'JZlib', a re-implementation of zlib in
+ pure Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jzlib.txt (BSD style License)
+ * HOMEPAGE:
+ * http://www.jcraft.com/jzlib/
+
+ This product optionally depends on 'Compress-LZF', a Java library for encoding and
+ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/ning/compress
+
+ This product optionally depends on 'lz4', a LZ4 Java compression
+ and decompression library written by Adrien Grand. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lz4.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jpountz/lz4-java
+
+ This product optionally depends on 'lzma-java', a LZMA Java compression
+ and decompression library, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lzma-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jponge/lzma-java
+
+ This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+ and decompression library written by William Kinney. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jfastlz.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jfastlz/
+
+ This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+ interchange format, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.protobuf.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/protobuf
+
+ This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+ a temporary self-signed X.509 certificate when the JVM does not provide the
+ equivalent functionality. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.bouncycastle.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.bouncycastle.org/
+
+ This product optionally depends on 'Snappy', a compression library produced
+ by Google Inc, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.snappy.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/snappy
+
+ This product optionally depends on 'JBoss Marshalling', an alternative Java
+ serialization API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
+ * HOMEPAGE:
+ * http://www.jboss.org/jbossmarshalling
+
+ This product optionally depends on 'Caliper', Google's micro-
+ benchmarking framework, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.caliper.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/google/caliper
+
+ This product optionally depends on 'Apache Log4J', a logging framework, which
+ can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.log4j.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://logging.apache.org/log4j/
+
+ This product optionally depends on 'Aalto XML', an ultra-high performance
+ non-blocking XML processor, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://wiki.fasterxml.com/AaltoHome
+
+ This product contains a modified version of 'HPACK', a Java implementation of
+ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.hpack.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/twitter/hpack
+
+ This product contains a modified portion of 'Apache Commons Lang', a Java library
+ provides utilities for the java.lang API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.commons-lang.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://commons.apache.org/proper/commons-lang/
+
+ This product contains a forked and modified version of Tomcat Native
+
+ * LICENSE:
+ * ASL2
+ * HOMEPAGE:
+ * http://tomcat.apache.org/native-doc/
+ * https://svn.apache.org/repos/asf/tomcat/native/
+
+ (ASLv2) Guava
+ The following NOTICE information applies:
+ Guava
+ Copyright 2015 The Guava Authors
+
+ (ASLv2) Dropwizard Metrics
+ The following NOTICE information applies:
+ Copyright (c) 2010-2013 Coda Hale, Yammer.com
+
+ This product includes software developed by Coda Hale and Yammer, Inc.
+
+ This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
+ LongAdder), which was released with the following comments:
+
+ Written by Doug Lea with assistance from members of JCP JSR-166
+ Expert Group and released to the public domain, as explained at
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+ ************************
+ Eclipse Public License 1.0
+ ************************
+
+ The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
+
+ (EPL 2.0)(GPL 2)(LGPL 2.1) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/pom.xml
new file mode 100644
index 000000000000..a621c826e06a
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/pom.xml
@@ -0,0 +1,62 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-cql-services-api
+ jar
+
+
+
+ org.apache.nifi
+ nifi-api
+ provided
+
+
+
+
+ org.apache.cassandra
+ java-driver-query-builder
+ 4.19.0
+
+
+
+ org.apache.nifi
+ nifi-record
+
+
+
+ org.apache.cassandra
+ java-driver-core
+ 4.19.0
+ provided
+
+
+
+ org.apache.nifi
+ nifi-ssl-context-service-api
+ provided
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLExecutionService.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLExecutionService.java
new file mode 100644
index 000000000000..1f5def5e5437
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLExecutionService.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.cql.api;
+
+import com.datastax.oss.driver.api.core.ConsistencyLevel;
+import com.datastax.oss.driver.api.core.DefaultConsistencyLevel;
+import io.netty.handler.ssl.ClientAuth;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.controller.ControllerService;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.service.cql.api.exception.QueryFailureException;
+import org.apache.nifi.ssl.SSLContextService;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public interface CQLExecutionService extends ControllerService {
+ PropertyDescriptor CONTACT_POINTS = new PropertyDescriptor.Builder()
+ .name("Cassandra Contact Points")
+ .description("Contact points are addresses of Cassandra nodes. The list of contact points should be "
+ + "comma-separated and in hostname:port format. Example node1:port,node2:port,...."
+ + " The default client port for Cassandra is 9042, but the port(s) must be explicitly specified.")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.HOSTNAME_PORT_LIST_VALIDATOR)
+ .build();
+
+ PropertyDescriptor DATACENTER = new PropertyDescriptor.Builder()
+ .name("Cassandra Datacenter")
+ .description("The datacenter setting to use with your node/cluster.")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .build();
+
+ PropertyDescriptor KEYSPACE = new PropertyDescriptor.Builder()
+ .name("Default Keyspace")
+ .description("The Cassandra Keyspace to connect to. If no keyspace is specified, the query will need to " +
+ "include the keyspace name before any table reference, in case of 'query' native processors or " +
+ "if the processor supports the 'Table' property, the keyspace name has to be provided with the " +
+ "table name in the form of .")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .build();
+
+ PropertyDescriptor PROP_SSL_CONTEXT_SERVICE = new PropertyDescriptor.Builder()
+ .name("SSL Context Service")
+ .description("The SSL Context Service used to provide client certificate information for TLS/SSL "
+ + "connections.")
+ .required(false)
+ .identifiesControllerService(SSLContextService.class)
+ .build();
+
+ PropertyDescriptor CLIENT_AUTH = new PropertyDescriptor.Builder()
+ .name("Client Auth")
+ .description("Client authentication policy when connecting to secure (TLS/SSL) cluster. "
+ + "Possible values are REQUIRED, WANT, NONE. This property is only used when an SSL Context "
+ + "has been defined and enabled.")
+ .required(false)
+ .allowableValues(ClientAuth.values())
+ .defaultValue("REQUIRE")
+ .build();
+
+ PropertyDescriptor USERNAME = new PropertyDescriptor.Builder()
+ .name("Username")
+ .description("Username to access the Cassandra cluster")
+ .required(false)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .build();
+
+ PropertyDescriptor PASSWORD = new PropertyDescriptor.Builder()
+ .name("Password")
+ .description("Password to access the Cassandra cluster")
+ .required(false)
+ .sensitive(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .build();
+
+ PropertyDescriptor CONSISTENCY_LEVEL = new PropertyDescriptor.Builder()
+ .name("Consistency Level")
+ .description("The strategy for how many replicas must respond before results are returned.")
+ .required(true)
+ .allowableValues(Stream.of(DefaultConsistencyLevel.values()).map(ConsistencyLevel::name).collect(Collectors.toSet()))
+ .defaultValue(ConsistencyLevel.ONE.name())
+ .build();
+
+ PropertyDescriptor FETCH_SIZE = new PropertyDescriptor.Builder()
+ .name("Fetch size")
+ .description("The number of result rows to be fetched from the result set at a time. Zero is the default "
+ + "and means there is no limit.")
+ .defaultValue("0")
+ .required(true)
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.INTEGER_VALIDATOR)
+ .build();
+
+ PropertyDescriptor COMPRESSION_TYPE = new PropertyDescriptor.Builder()
+ .name("Compression Type")
+ .description("Enable compression at transport-level requests and responses")
+ .required(false)
+ .allowableValues(ConnectionCompression.class)
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .defaultValue(ConnectionCompression.NONE.getValue())
+ .build();
+
+ PropertyDescriptor READ_TIMEOUT = new PropertyDescriptor.Builder()
+ .name("Read Timout")
+ .description("Read timeout. 0 means no timeout. If no value is set, the underlying default will be used.")
+ .required(false)
+ .defaultValue("30 sec")
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
+ .build();
+
+ PropertyDescriptor CONNECT_TIMEOUT = new PropertyDescriptor.Builder()
+ .name("Connect Timeout")
+ .description("Connection timeout. 0 means no timeout. If no value is set, the underlying default will be used.")
+ .required(false)
+ .defaultValue("30 sec")
+ .expressionLanguageSupported(ExpressionLanguageScope.ENVIRONMENT)
+ .addValidator(StandardValidators.TIME_PERIOD_VALIDATOR)
+ .build();
+
+ void query(String cql, boolean cacheStatement, List parameters, CQLQueryCallback callback) throws QueryFailureException;
+
+ void insert(String table, org.apache.nifi.serialization.record.Record record);
+
+ void insert(String table, List records);
+
+ String getTransitUrl(String tableName);
+
+ void delete(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys);
+
+ void update(String cassandraTable, org.apache.nifi.serialization.record.Record record, List updateKeys, UpdateMethod updateMethod);
+
+ void update(String cassandraTable, List records, List updateKeys, UpdateMethod updateMethod);
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLFieldInfo.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLFieldInfo.java
new file mode 100644
index 000000000000..6a9b38c5b4f6
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLFieldInfo.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cql.api;
+
+public class CQLFieldInfo {
+ private String fieldName;
+ private String dataType;
+ private int dataTypeProtocolCode;
+
+ public CQLFieldInfo(String fieldName, String dataType, int dataTypeProtocolCode) {
+ this.fieldName = fieldName;
+ this.dataType = dataType;
+ this.dataTypeProtocolCode = dataTypeProtocolCode;
+ }
+
+ public String getFieldName() {
+ return fieldName;
+ }
+
+ public String getDataType() {
+ return dataType;
+ }
+
+ public int getDataTypeProtocolCode() {
+ return dataTypeProtocolCode;
+ }
+}
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLQueryCallback.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLQueryCallback.java
new file mode 100644
index 000000000000..de8cec88ba6a
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/CQLQueryCallback.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cql.api;
+
+import org.apache.nifi.serialization.record.Record;
+
+import java.util.List;
+
+public interface CQLQueryCallback {
+ void receive(long rowNumber,
+ Record result, List fieldInfo, boolean isExhausted);
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/ConnectionCompression.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/ConnectionCompression.java
new file mode 100644
index 000000000000..7e0b298232e6
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/ConnectionCompression.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cql.api;
+
+import org.apache.nifi.components.DescribedValue;
+
+public enum ConnectionCompression implements DescribedValue {
+ NONE("none", "None", "None"),
+ LZ4("lz4", "LZ4", "LZ4"),
+ SNAPPY("snappy", "Snappy", "Snappy");
+
+ private String value;
+ private String displayName;
+ private String description;
+
+ ConnectionCompression(String value, String displayName, String description) {
+ this.value = value;
+ this.displayName = displayName;
+ this.description = description;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String getDisplayName() {
+ return displayName;
+ }
+
+ @Override
+ public String getDescription() {
+ return description;
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/UpdateMethod.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/UpdateMethod.java
new file mode 100644
index 000000000000..548a52304e56
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/UpdateMethod.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cql.api;
+
+public enum UpdateMethod {
+ DECREMENT,
+ INCREMENT,
+ SET
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/exception/QueryFailureException.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/exception/QueryFailureException.java
new file mode 100644
index 000000000000..c3f463f9726c
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-cql-services-api/src/main/java/org/apache/nifi/service/cql/api/exception/QueryFailureException.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.cql.api.exception;
+
+public class QueryFailureException extends RuntimeException {
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/pom.xml
new file mode 100644
index 000000000000..44a5d0647a7f
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/pom.xml
@@ -0,0 +1,51 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-scylladb-session-provider-service-nar
+ nar
+
+
+
+
+
+ com.google.guava
+ guava
+ provided
+
+
+
+
+
+
+ org.apache.nifi
+ nifi-cql-services-api-nar
+ 2.5.0-SNAPSHOT
+ nar
+
+
+ org.apache.nifi
+ nifi-scylladb-session-provider-service
+ 2.5.0-SNAPSHOT
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/LICENSE b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 000000000000..c567ce25dfd7
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,352 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
+
+This product bundles 'libffi' which is available under an MIT style license.
+ libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
+ see https://github.com/java-native-access/jna/blob/master/native/libffi/LICENSE
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+This product bundles 'asm' which is available under a 3-Clause BSD style license.
+For details see http://asm.ow2.org/asmdex-license.html
+
+ Copyright (c) 2012 France Télécom
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ THE POSSIBILITY OF SUCH DAMAGE.
+
+ The binary distribution of this product bundles 'Bouncy Castle JDK 1.5'
+ under an MIT style license.
+
+ Copyright (c) 2000 - 2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org)
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+The binary distribution of this product bundles 'JNR x86asm' under an MIT
+style license.
+
+ Copyright (C) 2010 Wayne Meissner
+ Copyright (c) 2008-2009, Petr Kobalicek
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+This product bundles 'logback' which is dual-licensed under the EPL v1.0
+and the LGPL 2.1.
+
+ Logback: the reliable, generic, fast and flexible logging framework.
+
+ Copyright (C) 1999-2017, QOS.ch. All rights reserved.
+
+ This program and the accompanying materials are dual-licensed under
+ either the terms of the Eclipse Public License v1.0 as published by
+ the Eclipse Foundation or (per the licensee's choosing) under the
+ terms of the GNU Lesser General Public License version 2.1 as
+ published by the Free Software Foundation.
+
+The binary distribution of this product bundles 'ANTLR 3' which is available
+under a "3-clause BSD" license. For details see http://www.antlr.org/license.html
+
+ Copyright (c) 2012 Terence Parr and Sam Harwell
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without modification, are permitted
+ provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this list of
+ conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright notice, this list of
+ conditions and the following disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ Neither the name of the author nor the names of its contributors may be used to endorse
+ or promote products derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/NOTICE b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/NOTICE
new file mode 100644
index 000000000000..f2261add8685
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service-nar/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,292 @@
+nifi-cassandra-services-nar
+Copyright 2016-2020 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+******************
+Apache Software License v2
+******************
+
+The following binary components are provided under the Apache Software License v2
+
+ (ASLv2) DataStax Java Driver for Apache Cassandra - Core
+ The following NOTICE information applies:
+ DataStax Java Driver for Apache Cassandra - Core
+ Copyright (C) 2012-2017 DataStax Inc.
+
+ (ASLv2) Jackson JSON processor
+ The following NOTICE information applies:
+ # Jackson JSON processor
+
+ Jackson is a high-performance, Free/Open Source JSON processing library.
+ It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
+ been in development since 2007.
+ It is currently developed by a community of developers, as well as supported
+ commercially by FasterXML.com.
+
+ ## Licensing
+
+ Jackson core and extension components may licensed under different licenses.
+ To find the details that apply to this artifact see the accompanying LICENSE file.
+ For more information, including possible other licensing options, contact
+ FasterXML.com (http://fasterxml.com).
+
+ ## Credits
+
+ A list of contributors may be found from CREDITS file, which is included
+ in some artifacts (usually source distributions); but is always available
+ from the source code management (SCM) system project uses.
+
+ (ASLv2) Apache Commons Codec
+ The following NOTICE information applies:
+ Apache Commons Codec
+ Copyright 2002-2014 The Apache Software Foundation
+
+ src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
+ contains test data from http://aspell.net/test/orig/batch0.tab.
+ Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+
+ ===============================================================================
+
+ The content of package org.apache.commons.codec.language.bm has been translated
+ from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+ with permission from the original authors.
+ Original source copyright:
+ Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
+
+ (ASLv2) Apache Commons Lang
+ The following NOTICE information applies:
+ Apache Commons Lang
+ Copyright 2001-2017 The Apache Software Foundation
+
+ This product includes software from the Spring Framework,
+ under the Apache License 2.0 (see: StringUtils.containsWhitespace())
+
+ (ASLv2) Guava
+ The following NOTICE information applies:
+ Guava
+ Copyright 2015 The Guava Authors
+
+ (ASLv2) JSON-SMART
+ The following NOTICE information applies:
+ Copyright 2011 JSON-SMART authors
+
+ (ASLv2) Dropwizard Metrics
+ The following NOTICE information applies:
+ Copyright (c) 2010-2013 Coda Hale, Yammer.com
+
+ This product includes software developed by Coda Hale and Yammer, Inc.
+
+ This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64,
+ LongAdder), which was released with the following comments:
+
+ Written by Doug Lea with assistance from members of JCP JSR-166
+ Expert Group and released to the public domain, as explained at
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+ (ASLv2) The Netty Project
+ The following NOTICE information applies:
+ Copyright 2014 The Netty Project
+ -------------------------------------------------------------------------------
+ This product contains the extensions to Java Collections Framework which has
+ been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+ * LICENSE:
+ * license/LICENSE.jsr166y.txt (Public Domain)
+ * HOMEPAGE:
+ * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+ * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+ This product contains a modified version of Robert Harder's Public Domain
+ Base64 Encoder and Decoder, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.base64.txt (Public Domain)
+ * HOMEPAGE:
+ * http://iharder.sourceforge.net/current/java/base64/
+
+ This product contains a modified portion of 'Webbit', an event based
+ WebSocket and HTTP server, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.webbit.txt (BSD License)
+ * HOMEPAGE:
+ * https://github.com/joewalnes/webbit
+
+ This product contains a modified portion of 'SLF4J', a simple logging
+ facade for Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.slf4j.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.slf4j.org/
+
+ This product contains a modified portion of 'Apache Harmony', an open source
+ Java SE, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.harmony.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://archive.apache.org/dist/harmony/
+
+ This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+ and decompression library written by Matthew J. Francis. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jbzip2.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jbzip2/
+
+ This product contains a modified portion of 'libdivsufsort', a C API library to construct
+ the suffix array and the Burrows-Wheeler transformed string for any input string of
+ a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.libdivsufsort.txt (MIT License)
+ * HOMEPAGE:
+ * https://github.com/y-256/libdivsufsort
+
+ This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jctools.txt (ASL2 License)
+ * HOMEPAGE:
+ * https://github.com/JCTools/JCTools
+
+ This product optionally depends on 'JZlib', a re-implementation of zlib in
+ pure Java, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jzlib.txt (BSD style License)
+ * HOMEPAGE:
+ * http://www.jcraft.com/jzlib/
+
+ This product optionally depends on 'Compress-LZF', a Java library for encoding and
+ decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/ning/compress
+
+ This product optionally depends on 'lz4', a LZ4 Java compression
+ and decompression library written by Adrien Grand. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lz4.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jpountz/lz4-java
+
+ This product optionally depends on 'lzma-java', a LZMA Java compression
+ and decompression library, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.lzma-java.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/jponge/lzma-java
+
+ This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+ and decompression library written by William Kinney. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jfastlz.txt (MIT License)
+ * HOMEPAGE:
+ * https://code.google.com/p/jfastlz/
+
+ This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+ interchange format, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.protobuf.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/protobuf
+
+ This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+ a temporary self-signed X.509 certificate when the JVM does not provide the
+ equivalent functionality. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.bouncycastle.txt (MIT License)
+ * HOMEPAGE:
+ * http://www.bouncycastle.org/
+
+ This product optionally depends on 'Snappy', a compression library produced
+ by Google Inc, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.snappy.txt (New BSD License)
+ * HOMEPAGE:
+ * https://github.com/google/snappy
+
+ This product optionally depends on 'JBoss Marshalling', an alternative Java
+ serialization API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
+ * HOMEPAGE:
+ * http://www.jboss.org/jbossmarshalling
+
+ This product optionally depends on 'Caliper', Google's micro-
+ benchmarking framework, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.caliper.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/google/caliper
+
+ This product optionally depends on 'Apache Log4J', a logging framework, which
+ can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.log4j.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://logging.apache.org/log4j/
+
+ This product optionally depends on 'Aalto XML', an ultra-high performance
+ non-blocking XML processor, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * http://wiki.fasterxml.com/AaltoHome
+
+ This product contains a modified version of 'HPACK', a Java implementation of
+ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.hpack.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/twitter/hpack
+
+ This product contains a modified portion of 'Apache Commons Lang', a Java library
+ provides utilities for the java.lang API, which can be obtained at:
+
+ * LICENSE:
+ * license/LICENSE.commons-lang.txt (Apache License 2.0)
+ * HOMEPAGE:
+ * https://commons.apache.org/proper/commons-lang/
+
+ This product contains a forked and modified version of Tomcat Native
+
+ * LICENSE:
+ * ASL2
+ * HOMEPAGE:
+ * http://tomcat.apache.org/native-doc/
+ * https://svn.apache.org/repos/asf/tomcat/native/
+
+ (ASLv2) Objenesis
+ The following NOTICE information applies:
+ Objenesis
+ Copyright 2006-2013 Joe Walnes, Henri Tremblay, Leonardo Mesquita
+
+************************
+Eclipse Public License 1.0
+************************
+
+The following binary components are provided under the Eclipse Public License 1.0. See project link for details.
+
+ (EPL 2.0)(GPL 2)(LGPL 2.1) JNR Posix ( jnr.posix ) https://github.com/jnr/jnr-posix/blob/master/LICENSE.txt
+ (EPL 1.0)(LGPL 2.1) Logback Classic (ch.qos.logback:logback-classic:jar:1.2.6 - http://logback.qos.ch/)
+ (EPL 1.0)(LGPL 2.1) Logback Core (ch.qos.logback:logback-core:jar:1.2.6 - http://logback.qos.ch/)
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/pom.xml
new file mode 100644
index 000000000000..4f84f1c53337
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/pom.xml
@@ -0,0 +1,103 @@
+
+
+
+
+ nifi-cql-bundle
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+
+ 4.0.0
+
+ nifi-scylladb-session-provider-service
+ jar
+
+
+
+ org.apache.nifi
+ nifi-api
+
+
+ org.apache.nifi
+ nifi-utils
+
+
+ org.apache.nifi
+ nifi-cql-services-api
+ 2.5.0-SNAPSHOT
+ provided
+
+
+
+ org.apache.nifi
+ nifi-cassandra-session-provider-service
+ 2.5.0-SNAPSHOT
+
+
+ org.apache.cassandra
+ java-driver-core
+
+
+
+
+
+ com.scylladb
+ java-driver-core
+ 4.17.0.0
+
+
+
+ org.apache.nifi
+ nifi-ssl-context-service-api
+
+
+ org.apache.nifi
+ nifi-framework-api
+
+
+ org.apache.nifi
+ nifi-mock
+
+
+
+ org.testcontainers
+ scylladb
+ 1.20.5
+
+
+
+ org.testcontainers
+ junit-jupiter
+
+
+
+ org.apache.nifi
+ nifi-record
+ compile
+
+
+ org.apache.nifi
+ nifi-avro-record-utils
+ 2.5.0-SNAPSHOT
+ compile
+
+
+ org.apache.nifi
+ nifi-security-cert-builder
+ 2.5.0-SNAPSHOT
+ test
+
+
+
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionService.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionService.java
new file mode 100644
index 000000000000..eb627e73140e
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionService.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.nifi.service.scylladb;
+
+import org.apache.nifi.service.cassandra.CassandraCQLExecutionService;
+
+public class ScyllaDBCQLExecutionService extends CassandraCQLExecutionService {
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
new file mode 100644
index 000000000000..7d6b91b8c1c0
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.nifi.service.scylladb.ScyllaDBCQLExecutionService
\ No newline at end of file
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/MockCassandraProcessor.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/MockCassandraProcessor.java
new file mode 100644
index 000000000000..62d481ebf0aa
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/MockCassandraProcessor.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.scylladb;
+
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.exception.ProcessException;
+import org.apache.nifi.processor.util.StandardValidators;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Mock Cassandra processor for testing CassandraSessionProvider
+ */
+public class MockCassandraProcessor extends AbstractProcessor {
+ private static PropertyDescriptor CASSANDRA_SESSION_PROVIDER = new PropertyDescriptor.Builder()
+ .name("Cassandra Session Provider")
+ .required(true)
+ .description("Controller Service to obtain a Cassandra connection session")
+ .addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
+ .identifiesControllerService(ScyllaDBCQLExecutionService.class)
+ .build();
+
+ @Override
+ public List getSupportedPropertyDescriptors() {
+ return Collections.singletonList(CASSANDRA_SESSION_PROVIDER);
+ }
+
+ @Override
+ public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
+
+ }
+}
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionServiceIT.java b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionServiceIT.java
new file mode 100644
index 000000000000..9d04fe1ad8c0
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/java/org/apache/nifi/service/scylladb/ScyllaDBCQLExecutionServiceIT.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.service.scylladb;
+
+import com.datastax.oss.driver.api.core.CqlSession;
+import com.datastax.oss.driver.api.core.cql.ResultSet;
+import com.datastax.oss.driver.api.core.cql.Row;
+import org.apache.nifi.security.cert.builder.StandardCertificateBuilder;
+import org.apache.nifi.serialization.SimpleRecordSchema;
+import org.apache.nifi.serialization.record.MapRecord;
+import org.apache.nifi.serialization.record.RecordField;
+import org.apache.nifi.serialization.record.RecordFieldType;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.service.cql.api.CQLExecutionService;
+import org.apache.nifi.service.cql.api.CQLQueryCallback;
+import org.apache.nifi.service.cql.api.UpdateMethod;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import org.testcontainers.scylladb.ScyllaDBContainer;
+import org.testcontainers.utility.MountableFile;
+
+import javax.security.auth.x500.X500Principal;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.Key;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.cert.Certificate;
+import java.security.cert.X509Certificate;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
+@Testcontainers
+public class ScyllaDBCQLExecutionServiceIT {
+ public static final String CASSANDRA_IMAGE = "scylladb/scylla:6.2";
+
+ public static final String adminPassword = UUID.randomUUID().toString();
+
+ private static TestRunner runner;
+ private static ScyllaDBCQLExecutionService sessionProvider;
+
+ private static final Map CONTAINER_ENVIRONMENT = new LinkedHashMap<>();
+
+ private static final Base64.Encoder ENCODER = Base64.getEncoder();
+
+ private static final X500Principal CERTIFICATE_ISSUER = new X500Principal("CN=localhost");
+
+ private static final Collection DNS_NAMES = Collections.singleton("localhost");
+
+ private static final String CERTIFICATE_FORMAT = "-----BEGIN CERTIFICATE-----%n%s%n-----END CERTIFICATE-----";
+
+ private static final String KEY_FORMAT = "-----BEGIN PRIVATE KEY-----%n%s%n-----END PRIVATE KEY-----";
+
+ private static final String SSL_DIRECTORY = "/ssl";
+
+ private static final String CERTIFICATE_FILE = "public.crt";
+
+ private static final String CONTAINER_CERTIFICATE_PATH = String.format("%s/%s", SSL_DIRECTORY, CERTIFICATE_FILE);
+
+ private static final String KEY_FILE = "private.key";
+
+ private static final String CONTAINER_KEY_PATH = String.format("%s/%s", SSL_DIRECTORY, KEY_FILE);
+
+ private static String trustStoreFilePath;
+
+ public static ScyllaDBContainer container = new ScyllaDBContainer(CASSANDRA_IMAGE);
+ private static CqlSession session;
+
+ @BeforeAll
+ public static void setup() throws Exception {
+ setCertificatePrivateKey();
+
+ container.withEnv(CONTAINER_ENVIRONMENT);
+ container.withExposedPorts(9042);
+ container.start();
+
+ MockCassandraProcessor mockCassandraProcessor = new MockCassandraProcessor();
+ sessionProvider = new ScyllaDBCQLExecutionService();
+
+ final String contactPoint = container.getContainerIpAddress() + ":" + container.getMappedPort(9042);
+
+ runner = TestRunners.newTestRunner(mockCassandraProcessor);
+ runner.addControllerService("cassandra-session-provider", sessionProvider);
+ runner.setProperty(sessionProvider, CQLExecutionService.USERNAME, "admin");
+ runner.setProperty(sessionProvider, CQLExecutionService.PASSWORD, adminPassword);
+ runner.setProperty(sessionProvider, CQLExecutionService.CONTACT_POINTS, contactPoint);
+ runner.setProperty(sessionProvider, CQLExecutionService.DATACENTER, "datacenter1");
+ runner.setProperty(sessionProvider, CQLExecutionService.KEYSPACE, "testspace");
+
+ session = CqlSession
+ .builder()
+ .addContactPoint(container.getContactPoint())
+ .withLocalDatacenter("datacenter1")
+ .build();
+
+ session.execute("create keyspace testspace with replication = { 'class': 'SimpleStrategy', 'replication_factor': 1};\n");
+ session.execute("""
+ create table testspace.message
+ (
+ sender text,
+ receiver text,
+ message text,
+ when_sent timestamp,
+ primary key ( sender, receiver, when_sent )
+ );
+ """);
+ session.execute("insert into testspace.message (sender, receiver, message, when_sent) values ('test@dummytest.com', 'receiver@test.com', 'Hello, world!', dateof(now()));");
+
+ session.execute("""
+ create table testspace.query_test
+ (
+ column_a text,
+ column_b text,
+ when timestamp,
+ primary key ( (column_a), column_b)
+ );
+ """);
+
+ session.execute("""
+ create table testspace.counter_test
+ (
+ column_a text,
+ increment_field counter,
+ primary key ( column_a )
+ );
+ """);
+
+ session.execute("""
+ create table testspace.simple_set_test
+ (
+ username text,
+ is_active boolean,
+ primary key ( username )
+ );""");
+
+ Thread.sleep(250);
+ runner.enableControllerService(sessionProvider);
+ }
+
+ @AfterAll
+ public static void tearDown() throws Exception {
+ container.stop();
+ }
+
+ private RecordSchema getSchema() {
+ List fields = List.of(
+ new RecordField("sender", RecordFieldType.STRING.getDataType()),
+ new RecordField("receiver", RecordFieldType.STRING.getDataType()),
+ new RecordField("message", RecordFieldType.STRING.getDataType()),
+ new RecordField("when_sent", RecordFieldType.TIMESTAMP.getDataType())
+ );
+ return new SimpleRecordSchema(fields);
+ }
+
+ @Test
+ public void testInsertRecord() {
+ RecordSchema schema = getSchema();
+ Map rawRecord = new HashMap<>();
+ rawRecord.put("sender", "john.smith");
+ rawRecord.put("receiver", "jane.smith");
+ rawRecord.put("message", "hello");
+ rawRecord.put("when_sent", Instant.now());
+
+ MapRecord record = new MapRecord(schema, rawRecord);
+
+ assertDoesNotThrow(() -> sessionProvider.insert("message", record));
+ }
+
+ @Test
+ public void testIncrementAndDecrement() throws Exception {
+ RecordField field1 = new RecordField("column_a", RecordFieldType.STRING.getDataType());
+ RecordField field2 = new RecordField("increment_field", RecordFieldType.INT.getDataType());
+ RecordSchema schema = new SimpleRecordSchema(List.of(field1, field2));
+
+ HashMap map = new HashMap<>();
+ map.put("column_a", "abcdef");
+ map.put("increment_field", 1);
+
+ MapRecord record = new MapRecord(schema, map);
+
+ List updateKeys = new ArrayList<>();
+ updateKeys.add("column_a");
+
+ //Set the initial value
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.INCREMENT);
+
+ Thread.sleep(1000);
+
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.INCREMENT);
+
+ ResultSet results = session.execute("select increment_field from testspace.counter_test where column_a = 'abcdef'");
+
+ Iterator rowIterator = results.iterator();
+
+ Row row = rowIterator.next();
+
+ assertEquals(2, row.getLong("increment_field"));
+
+ sessionProvider.update("counter_test", record, updateKeys, UpdateMethod.DECREMENT);
+
+ results = session.execute("select increment_field from testspace.counter_test where column_a = 'abcdef'");
+
+ rowIterator = results.iterator();
+
+ row = rowIterator.next();
+
+ assertEquals(1, row.getLong("increment_field"));
+ }
+
+ @Test
+ public void testUpdateSet() throws Exception {
+ session.execute("insert into testspace.simple_set_test(username, is_active) values('john.smith', true)");
+ Thread.sleep(250);
+
+ RecordField field1 = new RecordField("username", RecordFieldType.STRING.getDataType());
+ RecordField field2 = new RecordField("is_active", RecordFieldType.BOOLEAN.getDataType());
+ RecordSchema schema = new SimpleRecordSchema(List.of(field1, field2));
+
+ HashMap map = new HashMap<>();
+ map.put("username", "john.smith");
+ map.put("is_active", false);
+
+ MapRecord record = new MapRecord(schema, map);
+
+ List updateKeys = new ArrayList<>();
+ updateKeys.add("username");
+
+ sessionProvider.update("simple_set_test", record, updateKeys, UpdateMethod.SET);
+
+ Iterator iterator = session.execute("select is_active from testspace.simple_set_test where username = 'john.smith'").iterator();
+
+ Row row = iterator.next();
+
+ assertFalse(row.getBoolean("is_active"));
+ }
+
+ @Test
+ public void testQueryRecord() {
+ String[] statements = """
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'def', dateof(now()));
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'ghi', dateof(now()));
+ insert into testspace.query_test (column_a, column_b, when)
+ values ('abc', 'jkl', dateof(now()));
+ """.trim().split("\\;");
+ for (String statement : statements) {
+ session.execute(statement);
+ }
+
+ List records = new ArrayList<>();
+ CQLQueryCallback callback = (rowNumber, result, fields, isExhausted) -> records.add(result);
+
+ sessionProvider.query("select * from testspace.query_test", false, null, callback);
+ }
+
+
+ private static void setCertificatePrivateKey() throws Exception {
+ final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA");
+ final KeyPair keyPair = keyPairGenerator.generateKeyPair();
+ final X509Certificate certificate = new StandardCertificateBuilder(keyPair, CERTIFICATE_ISSUER, Duration.ofDays(1))
+ .setDnsSubjectAlternativeNames(DNS_NAMES)
+ .build();
+
+ final Key key = keyPair.getPrivate();
+ final String keyEncoded = getKeyEncoded(key);
+
+ Path keyPath = writeCertificateEncoded(keyEncoded, ".key");
+
+ final String certificateEncoded = getCertificateEncoded(certificate);
+ final Path certificateFilePath = writeCertificateEncoded(certificateEncoded, ".crt");
+ trustStoreFilePath = certificateFilePath.toString();
+
+ container = container
+ .withSsl(
+ MountableFile.forHostPath(certificateFilePath.toString()),
+ MountableFile.forHostPath(keyPath.toString()),
+ MountableFile.forHostPath(certificateFilePath.toString())
+ );
+ }
+
+ private static String getCertificateEncoded(final Certificate certificate) throws Exception {
+ final byte[] certificateEncoded = certificate.getEncoded();
+ final String encoded = ENCODER.encodeToString(certificateEncoded);
+ return String.format(CERTIFICATE_FORMAT, encoded);
+ }
+
+ private static String getKeyEncoded(final Key key) {
+ final byte[] keyEncoded = key.getEncoded();
+ final String encoded = ENCODER.encodeToString(keyEncoded);
+ return String.format(KEY_FORMAT, encoded);
+ }
+
+ private static Path writeCertificateEncoded(final String certificateEncoded, String extension) throws IOException {
+ final Path certificateFile = Files.createTempFile(ScyllaDBCQLExecutionServiceIT.class.getSimpleName(), extension);
+ Files.write(certificateFile, certificateEncoded.getBytes(StandardCharsets.UTF_8));
+ certificateFile.toFile().deleteOnExit();
+ return certificateFile;
+ }
+}
+
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla-test-ssl.yaml b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla-test-ssl.yaml
new file mode 100644
index 000000000000..c355308c0771
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla-test-ssl.yaml
@@ -0,0 +1,853 @@
+#Licensed to the Apache Software Foundation (ASF) under one or more
+#contributor license agreements. See the NOTICE file distributed with
+#this work for additional information regarding copyright ownership.
+#The ASF licenses this file to You under the Apache License, Version 2.0
+#(the "License"); you may not use this file except in compliance with
+#the License. You may obtain a copy of the License at
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
+
+# Scylla storage config YAML
+
+#######################################
+# This file is split to two sections:
+# 1. Supported parameters
+# 2. Unsupported parameters: reserved for future use or backwards
+# compatibility.
+# Scylla will only read and use the first segment
+#######################################
+
+### Supported Parameters
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+# It is recommended to change the default value when creating a new cluster.
+# You can NOT modify this value for an existing cluster
+#cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+num_tokens: 256
+
+# Directory where Scylla should store all its files, which are commitlog,
+# data, hints, view_hints and saved_caches subdirectories. All of these
+# subs can be overridden by the respective options below.
+# If unset, the value defaults to /var/lib/scylla
+# workdir: /var/lib/scylla
+
+# Directory where Scylla should store data on disk.
+# data_file_directories:
+# - /var/lib/scylla/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# commitlog_directory: /var/lib/scylla/commitlog
+
+# schema commit log. A special commitlog instance
+# used for schema and system tables.
+# When running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# schema_commitlog_directory: /var/lib/scylla/commitlog/schema
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Scylla won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# The size of the individual schema commitlog file segments.
+#
+# The default size is 128, which is 4 times larger than the default
+# size of the data commitlog. It's because the segment size puts
+# a limit on the mutation size that can be written at once, and some
+# schema mutation writes are much larger than average.
+schema_commitlog_segment_size_in_mb: 128
+
+# seed_provider class_name is saved for future use.
+# A seed address is mandatory.
+seed_provider:
+ # The addresses of hosts that will serve as contact points for the joining node.
+ # It allows the node to discover the cluster ring topology on startup (when
+ # joining the cluster).
+ # Once the node has joined the cluster, the seed list has no function.
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # In a new cluster, provide the address of the first node.
+ # In an existing cluster, specify the address of at least one existing node.
+ # If you specify addresses of more than one node, use a comma to separate them.
+ # For example: ",,"
+ - seeds: "127.0.0.1"
+
+# Address to bind to and tell other Scylla nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# If you leave broadcast_address (below) empty, then setting listen_address
+# to 0.0.0.0 is wrong as other nodes will not know how to reach this node.
+# If you set broadcast_address, then you can set listen_address to 0.0.0.0.
+listen_address: localhost
+
+# Address to broadcast to other Scylla nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# When using multiple physical network interfaces, set this to true to listen on broadcast_address
+# in addition to the listen_address, allowing nodes to communicate in both interfaces.
+# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2.
+#
+# listen_on_broadcast_address: false
+
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# To disable the CQL native transport, remove this option and configure native_transport_port_ssl.
+native_transport_port: 9042
+
+# Like native_transport_port, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+native_shard_aware_transport_port: 19042
+
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+#native_transport_port_ssl: 9142
+
+# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+#native_shard_aware_transport_port_ssl: 19142
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# how long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Scylla enough about your network topology to route
+# requests efficiently
+# - it allows Scylla to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Scylla will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Scylla provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+# - GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Scylla will switch to the private IP after
+# establishing a connection.)
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# The address or interface to bind the native transport server to.
+#
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+rpc_address: localhost
+# rpc_interface: eth1
+# rpc_interface_prefer_ipv6: false
+
+# port for REST API server
+api_port: 10000
+
+# IP for the REST API server
+api_address: 127.0.0.1
+
+# Log WARN on any batch size exceeding this value. 128 kiB per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 128
+
+# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 1024
+
+ # Authentication backend, identifying users
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+ # PasswordAuthenticator}.
+ #
+ # - AllowAllAuthenticator performs no checks - set it to disable authentication.
+ # - PasswordAuthenticator relies on username/password pairs to authenticate
+ # users. It keeps usernames and hashed passwords in system_auth.credentials table.
+ # Please increase system_auth keyspace replication factor if you use this authenticator.
+ # - com.scylladb.auth.TransitionalAuthenticator requires username/password pair
+ # to authenticate in the same manner as PasswordAuthenticator, but improper credentials
+ # result in being logged in as an anonymous user. Use for upgrading clusters' auth.
+ # - com.scylladb.auth.SaslauthdAuthenticator outsources authentication to a running saslauthd
+ # daemon. When using this authenticator, you must set the saslauthd_socket_path property to the
+ # Unix domain socket on which saslauthd is listening.
+ # authenticator: AllowAllAuthenticator
+ # saslauthd_socket_path: /var/state/saslauthd/mux
+
+ # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+ # Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+ # CassandraAuthorizer}.
+ #
+ # - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+ # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+ # increase system_auth keyspace replication factor if you use this authorizer.
+ # - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for
+ # authorizing permission management. Otherwise, it allows all. Use for upgrading
+ # clusters' auth.
+ # authorizer: AllowAllAuthorizer
+
+ # initial_token allows you to specify tokens manually. While you can use # it with
+ # vnodes (num_tokens > 1, above) -- in which case you should provide a
+ # comma-separated list -- it's primarily used when adding nodes # to legacy clusters
+ # that do not have vnodes enabled.
+ # initial_token:
+
+ # RPC address to broadcast to drivers and other Scylla nodes. This cannot
+ # be set to 0.0.0.0. If left blank, this will be set to the value of
+ # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+ # be set.
+ # broadcast_rpc_address: 1.2.3.4
+
+ # Uncomment to enable experimental features
+ # experimental_features:
+ # - udf
+ # - alternator-streams
+ # - broadcast-tables
+ # - keyspace-storage-options
+
+ # The directory where hints files are stored if hinted handoff is enabled.
+ # hints_directory: /var/lib/scylla/hints
+
+# The directory where hints files are stored for materialized-view updates
+# view_hints_directory: /var/lib/scylla/view_hints
+
+# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+# hinted_handoff_enabled: true
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+# max_hint_window_in_ms: 10800000 # 3 hours
+
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 10000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+# permissions_validity_in_ms: 10000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this also must have
+# a non-zero value. Defaults to 2000. It's recommended to set this value to
+# be at least 3 times smaller than the permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Murmur3Partitioner is currently the only supported partitioner,
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Total space to use for commitlogs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Scylla will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+#
+# A value of -1 (default) will automatically equate it to the total amount of memory
+# available for Scylla.
+commitlog_total_space_in_mb: -1
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# ssl_storage_port: 7001
+
+# listen_interface: eth0
+# listen_interface_prefer_ipv6: false
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+# start_native_transport: true
+
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# enable or disable keepalive on rpc/native connections
+# rpc_keepalive: true
+
+# Set to true to have Scylla create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+# incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Scylla won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+# snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+# auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exhaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+# tombstone_warn_threshold: 1000
+# tombstone_failure_threshold: 100000
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+# 1) a smaller granularity means more index entries are generated
+# and looking up rows within the partition by collation column
+# is faster
+# 2) but, Scylla will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+# column_index_size_in_kb: 64
+
+# Auto-scaling of the promoted index prevents running out of memory
+# when the promoted index grows too large (due to partitions with many rows
+# vs. too small column_index_size_in_kb). When the serialized representation
+# of the promoted index grows by this threshold, the desired block size
+# for this partition (initialized to column_index_size_in_kb)
+# is doubled, to decrease the sampling resolution by half.
+#
+# To disable promoted index auto-scaling, set the threshold to 0.
+# column_index_auto_scale_threshold_in_kb: 10240
+
+# Log a warning when writing partitions larger than this value
+# compaction_large_partition_warning_threshold_mb: 1000
+
+# Log a warning when writing rows larger than this value
+# compaction_large_row_warning_threshold_mb: 10
+
+# Log a warning when writing cells larger than this value
+# compaction_large_cell_warning_threshold_mb: 1
+
+# Log a warning when row number is larger than this value
+# compaction_rows_count_warning_threshold: 100000
+
+# Log a warning when writing a collection containing more elements than this value
+# compaction_collection_elements_count_warning_threshold: 10000
+
+# How long the coordinator should wait for seq or index scans to complete
+# range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+# counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+# cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+# truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+# request_timeout_in_ms: 10000
+
+# Enable or disable inter-node encryption.
+# You must also generate keys and provide the appropriate key and trust store locations and passwords.
+#
+# The available internode options are : all, none, dc, rack
+# If set to dc scylla will encrypt the traffic between the DCs
+# If set to rack scylla will encrypt the traffic between the racks
+#
+# SSL/TLS algorithm and ciphers used can be controlled by
+# the priority_string parameter. Info on priority string
+# syntax and values is available at:
+# https://gnutls.org/manual/html_node/Priority-Strings.html
+#
+# The require_client_auth parameter allows you to
+# restrict access to service based on certificate
+# validation. Client must provide a certificate
+# accepted by the used trust store to connect.
+#
+server_encryption_options:
+ internode_encryption: none
+ certificate: conf/scylla.crt
+ keyfile: conf/scylla.key
+ truststore:
+# certficate_revocation_list:
+ require_client_auth: False
+# priority_string:
+
+# enable or disable client/server encryption.
+# client_encryption_options:
+# enabled: false
+# certificate: conf/scylla.crt
+# keyfile: conf/scylla.key
+# truststore:
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+# enable_session_tickets:
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+# internode_compression: none
+
+# Enables inter-node traffic compression metrics (`scylla_rpc_compression_...`)
+# and enables a new implementation of inter-node traffic compressors,
+# capable of using zstd (in addition to the default lz4)
+# and shared dictionaries.
+# (Those features must still be enabled by other settings).
+# Has minor CPU cost.
+#
+# internode_compression_enable_advanced: false
+
+# Enables training of shared compression dictionaries on inter-node traffic.
+# New dictionaries are distributed throughout the cluster via Raft,
+# and used to improve the effectiveness of inter-node traffic compression
+# when `internode_compression_enable_advanced` is enabled.
+#
+# WARNING: this may leak unencrypted data to disk. The trained dictionaries
+# contain randomly-selected pieces of data written to the cluster.
+# When the Raft log is unencrypted, those pieces of data will be
+# written to disk unencrypted. At the moment of writing, there is no
+# way to encrypt the Raft log.
+# This problem is tracked by https://github.com/scylladb/scylla-enterprise/issues/4717.
+#
+# Can be: never - Dictionaries aren't trained by this node.
+# when_leader - New dictionaries are trained by this node only if
+# it's the current Raft leader.
+# always - Dictionaries are trained by this node unconditionally.
+#
+# For efficiency reasons, training shouldn't be enabled on more than one node.
+# To enable it on a single node, one can let the cluster pick the trainer
+# by setting `when_leader` on all nodes, or specify one manually by setting `always`
+# on one node and `never` on others.
+#
+# rpc_dict_training_when: never
+
+# A number in range [0.0, 1.0] specifying the share of CPU which can be spent
+# by this node on compressing inter-node traffic with zstd.
+#
+# Depending on the workload, enabling zstd might have a drastic negative
+# effect on performance, so it shouldn't be done lightly.
+#
+# internode_compression_zstd_max_cpu_fraction: 0.0
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+# inter_dc_tcp_nodelay: false
+
+# Relaxation of environment checks.
+#
+# Scylla places certain requirements on its environment. If these requirements are
+# not met, performance and reliability can be degraded.
+#
+# These requirements include:
+# - A filesystem with good support for asynchronous I/O (AIO). Currently,
+# this means XFS.
+#
+# false: strict environment checks are in place; do not start if they are not met.
+# true: relaxed environment checks; performance and reliability may degraade.
+#
+# developer_mode: false
+
+
+# Idle-time background processing
+#
+# Scylla can perform certain jobs in the background while the system is otherwise idle,
+# freeing processor resources when there is other work to be done.
+#
+# defragment_memory_on_idle: true
+#
+# prometheus port
+# By default, Scylla opens prometheus API port on port 9180
+# setting the port to 0 will disable the prometheus API.
+# prometheus_port: 9180
+#
+# prometheus address
+# Leaving this blank will set it to the same value as listen_address.
+# This means that by default, Scylla listens to the prometheus API on the same
+# listening address (and therefore network interface) used to listen for
+# internal communication. If the monitoring node is not in this internal
+# network, you can override prometheus_address explicitly - e.g., setting
+# it to 0.0.0.0 to listen on all interfaces.
+# prometheus_address: 1.2.3.4
+
+# audit settings
+# By default, Scylla does not audit anything.
+# 'audit' config option controls if and where to output audited events:
+# - "none": auditing is disabled (default)
+# - "table": save audited events in audit.audit_log column family
+# - "syslog": send audited events via syslog (depends on OS, but usually to /dev/log)
+# audit: "none"
+#
+# List of statement categories that should be audited.
+# audit_categories: "DCL,DDL,AUTH"
+#
+# List of tables that should be audited.
+# audit_tables: ".,."
+#
+# List of keyspaces that should be fully audited.
+# All tables in those keyspaces will be audited
+# audit_keyspaces: ","
+#
+# Overrides the Unix socket path used to connect to syslog. If left unset, it'll
+# use the default on the build system, which is usually "/dev/log"
+# audit_unix_socket_path: "/dev/log"
+
+# Distribution of data among cores (shards) within a node
+#
+# Scylla distributes data within a node among shards, using a round-robin
+# strategy:
+# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ...
+#
+# Scylla versions 1.6 and below used just one repetition of the pattern;
+# this interfered with data placement among nodes (vnodes).
+#
+# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this
+# provides for better data distribution.
+#
+# the value below is log (base 2) of the number of repetitions.
+#
+# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and
+# below.
+#
+# Keep at 12 for new clusters.
+murmur3_partitioner_ignore_msb_bits: 12
+
+# Use on a new, parallel algorithm for performing aggregate queries.
+# Set to `false` to fall-back to the old algorithm.
+# enable_parallelized_aggregation: true
+
+# Time for which task manager task started internally is kept in memory after it completes.
+# task_ttl_in_seconds: 0
+
+# Time for which task manager task started by user is kept in memory after it completes.
+# user_task_ttl_in_seconds: 3600
+
+# In materialized views, restrictions are allowed only on the view's primary key columns.
+# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part
+# of the view's primary key. These invalid restrictions were ignored.
+# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions.
+#
+# Can be true, false, or warn.
+# * `true`: IS NOT NULL is allowed only on the view's primary key columns,
+# trying to use it on other columns will cause an error, as it should.
+# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored.
+# It's useful for backwards compatibility.
+# * `warn`: The same as false, but there's a warning about invalid view restrictions.
+#
+# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`.
+# New clusters have this option set to `true` by scylla-test-ssl.yaml (which overrides the default `warn`)
+# to make sure that trying to create an invalid view causes an error.
+strict_is_not_null_in_views: true
+
+# The Unix Domain Socket the node uses for maintenance socket.
+# The possible options are:
+# * ignore: the node will not open the maintenance socket,
+# * workdir: the node will open the maintenance socket on the path /cql.m,
+# where is a path defined by the workdir configuration option,
+# * : the node will open the maintenance socket on the path .
+maintenance_socket: ignore
+
+# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL
+# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime
+# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating
+# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false
+# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers.
+# live_updatable_config_params_changeable_via_cql: true
+
+# ****************
+# * GUARDRAILS *
+# ****************
+
+# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold.
+# Please note that the value of 0 is always allowed,
+# which means that having no replication at all, i.e. RF = 0, is always valid.
+# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled.
+# Commenting out a guardrail also means it is disabled.
+# minimum_replication_factor_fail_threshold: -1
+# minimum_replication_factor_warn_threshold: 3
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
+
+#
+# System information encryption settings
+#
+# If enabled, system tables that may contain sensitive information (system.batchlog,
+# system.paxos), hints files and commit logs are encrypted with the
+# encryption settings below.
+#
+# When enabling system table encryption on a node with existing data, run
+# `nodetool upgradesstables -a` on the listed tables to encrypt existing data.
+#
+# When tracing is enabled, sensitive info will be written into the tables in the
+# system_traces keyspace. Those tables should be configured to encrypt their data
+# on disk.
+#
+# It is recommended to use remote encryption keys from a KMIP server/KMS when using
+# Transparent Data Encryption (TDE) features.
+# Local key support is provided when a KMIP server/KMS is not available.
+#
+# See the scylla documentation for more info on available key providers and
+# their properties.
+#
+# system_info_encryption:
+# enabled: true
+# cipher_algorithm: AES
+# secret_key_strength: 128
+# key_provider: LocalFileSystemKeyProviderFactory
+# secret_key_file:
+#
+# system_info_encryption:
+# enabled: true
+# cipher_algorithm: AES
+# secret_key_strength: 128
+# key_provider: KmipKeyProviderFactory
+# kmip_host:
+# template_name: (optional)
+# key_namespace: (optional)
+#
+
+#
+# The directory where system keys are kept
+# This directory should have 700 permissions and belong to the scylla user
+#
+# system_key_directory: /etc/scylla/conf/resources/system_keys
+#
+
+#
+# KMIP host(s).
+#
+# The unique name of kmip host/cluster that can be referenced in table schema.
+#
+# host.yourdomain.com={ hosts=[, ...], keyfile=/path/to/keyfile, truststore=/path/to/truststore.pem, key_cache_millis=, timeout= }:...
+#
+# The KMIP connection management only supports failover, so all requests will go through a
+# single KMIP server. There is no load balancing, as no KMIP servers (at the time of this writing)
+# support read replication, or other strategies for availability.
+#
+# Hosts are tried in the order they appear here. Add them in the same sequence they'll fail over in.
+#
+# KMIP requests will fail over/retry 'max_command_retries' times (default 3)
+#
+# kmip_hosts:
+# :
+# hosts: [, ...]
+# certificate: (optional)
+# keyfile: (optional)
+# truststore: (optional)
+# priority_string: (optional)
+# username: (optional>
+# password: (optional)
+# max_command_retries: (optional; default 3)
+# key_cache_expiry:
+# key_cache_refresh:
+# :
+# ...
+#
+
+#
+# KMS host(s).
+#
+# The unique name of kms host/account config that can be referenced in table schema.
+#
+# host.yourdomain.com={ endpoint=, aws_access_key_id=, aws_secret_access_key=, aws_region=, master_key=, keyfile=/path/to/keyfile, truststore=/path/to/truststore.pem, key_cache_millis=, timeout= }:...
+#
+# Actual connection can be either an explicit endpoint (:), or selected automatic via aws_region.
+#
+# Authentication can be explicit with aws_access_key_id and aws_secret_access_key. Either secret or both can be omitted
+# in which case the provider will try to read them from AWS credentials in ~/.aws/credentials. If aws_profile is set, the
+# credentials in this section is used.
+#
+# master_key is an AWS KMS key id or alias from which all keys used for actual encryption of scylla data will be derived.
+# This key must be pre-created with access policy allowing the above AWS id Encrypt, Decrypt and GenerateDataKey operations.
+#
+# kms_hosts:
+# :
+# endpoint: http(s)://(:port) (optional)
+# aws_region: (optional)
+# aws_access_key_id: (optional)
+# aws_secret_access_key: (optional)
+# aws_profile: (optional)
+# aws_use_ec2_credentials: (default false) If true, KMS queries will use the credentials provided by ec2 instance role metadata as initial access key.
+# aws_use_ec2_region: (default false) If true, KMS queries will use the AWS region indicated by ec2 instance metadata
+# aws_assume_role_arn: (optional) If set, any KMS query will first attempt to assume this role.
+# master_key: (required)
+# certificate: (optional)
+# keyfile: (optional)
+# truststore: (optional)
+# priority_string: (optional)
+# key_cache_expiry:
+# key_cache_refresh:
+# :
+# ...
+#
+
+#
+# Server-global user information encryption settings
+#
+# If enabled, all user tables are encrypted with the
+# encryption settings below, unless the table has local scylla_encryption_options
+# specified.
+#
+# When enabling user table encryption on a node with existing data, run
+# `nodetool upgradesstables -a` on all user tables to encrypt existing data.
+#
+# It is recommended to use remote encryption keys from a KMIP server or KMS when using
+# Transparent Data Encryption (TDE) features.
+# Local key support is provided when a KMIP server/KMS is not available.
+#
+# See the scylla documentation for more info on available key providers and
+# their properties.
+#
+# user_info_encryption:
+# enabled: true
+# cipher_algorithm: AES
+# secret_key_strength: 128
+# key_provider: LocalFileSystemKeyProviderFactory
+# secret_key_file:
+#
+# user_info_encryption:
+# enabled: true
+# cipher_algorithm: AES
+# secret_key_strength: 128
+# key_provider: KmipKeyProviderFactory
+# kmip_host:
+# template_name: (optional)
+# key_namespace: (optional)
+#
+
+# Guardrails to warn about or disallow creating a keyspace with specific replication strategy.
+# Each of these 2 settings is a list storing replication strategies considered harmful.
+# The replication strategies to choose from are:
+# 1) SimpleStrategy,
+# 2) NetworkTopologyStrategy,
+# 3) LocalStrategy,
+# 4) EverywhereStrategy
+#
+# replication_strategy_warn_list:
+# - SimpleStrategy
+# replication_strategy_fail_list:
+
+# Guardrail to enable the deprecated feature of CREATE TABLE WITH COMPACT STORAGE.
+# enable_create_table_with_compact_storage: false
+
+# Enable tablets for new keyspaces.
+# When enabled, newly created keyspaces will have tablets enabled by default.
+# That can be explicitly disabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': false}` replication option.
+#
+# Correspondingly, when disabled, newly created keyspaces will use vnodes
+# unless tablets are explicitly enabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': true}` replication option.
+#
+# Note that creating keyspaces with tablets enabled or disabled is irreversible.
+# The `tablets` option cannot be changed using `ALTER KEYSPACE`.
+enable_tablets: true
diff --git a/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla.yaml b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla.yaml
new file mode 100644
index 000000000000..d24b15cdb07d
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/nifi-scylladb-session-provider-service/src/test/resources/scylla.yaml
@@ -0,0 +1,636 @@
+#Licensed to the Apache Software Foundation (ASF) under one or more
+#contributor license agreements. See the NOTICE file distributed with
+#this work for additional information regarding copyright ownership.
+#The ASF licenses this file to You under the Apache License, Version 2.0
+#(the "License"); you may not use this file except in compliance with
+#the License. You may obtain a copy of the License at
+#http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
+
+
+# Scylla storage config YAML
+
+#######################################
+# This file is split to two sections:
+# 1. Supported parameters
+# 2. Unsupported parameters: reserved for future use or backwards
+# compatibility.
+# Scylla will only read and use the first segment
+#######################################
+
+### Supported Parameters
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+# It is recommended to change the default value when creating a new cluster.
+# You can NOT modify this value for an existing cluster
+#cluster_name: 'Test Cluster'
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+num_tokens: 256
+
+# Directory where Scylla should store all its files, which are commitlog,
+# data, hints, view_hints and saved_caches subdirectories. All of these
+# subs can be overridden by the respective options below.
+# If unset, the value defaults to /var/lib/scylla
+# workdir: /var/lib/scylla
+
+# Directory where Scylla should store data on disk.
+# data_file_directories:
+# - /var/lib/scylla/data
+
+# commit log. when running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# commitlog_directory: /var/lib/scylla/commitlog
+
+# schema commit log. A special commitlog instance
+# used for schema and system tables.
+# When running on magnetic HDD, this should be a
+# separate spindle than the data directories.
+# schema_commitlog_directory: /var/lib/scylla/commitlog/schema
+
+# commitlog_sync may be either "periodic" or "batch."
+#
+# When in batch mode, Scylla won't ack writes until the commit log
+# has been fsynced to disk. It will wait
+# commitlog_sync_batch_window_in_ms milliseconds between fsyncs.
+# This window should be kept short because the writer threads will
+# be unable to do extra work while waiting. (You may need to increase
+# concurrent_writes for the same reason.)
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 2
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentially from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# The size of the individual schema commitlog file segments.
+#
+# The default size is 128, which is 4 times larger than the default
+# size of the data commitlog. It's because the segment size puts
+# a limit on the mutation size that can be written at once, and some
+# schema mutation writes are much larger than average.
+schema_commitlog_segment_size_in_mb: 128
+
+# seed_provider class_name is saved for future use.
+# A seed address is mandatory.
+seed_provider:
+ # The addresses of hosts that will serve as contact points for the joining node.
+ # It allows the node to discover the cluster ring topology on startup (when
+ # joining the cluster).
+ # Once the node has joined the cluster, the seed list has no function.
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # In a new cluster, provide the address of the first node.
+ # In an existing cluster, specify the address of at least one existing node.
+ # If you specify addresses of more than one node, use a comma to separate them.
+ # For example: ",,"
+ - seeds: "127.0.0.1"
+
+# Address to bind to and tell other Scylla nodes to connect to.
+# You _must_ change this if you want multiple nodes to be able to communicate!
+#
+# If you leave broadcast_address (below) empty, then setting listen_address
+# to 0.0.0.0 is wrong as other nodes will not know how to reach this node.
+# If you set broadcast_address, then you can set listen_address to 0.0.0.0.
+listen_address: localhost
+
+# Address to broadcast to other Scylla nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# When using multiple physical network interfaces, set this to true to listen on broadcast_address
+# in addition to the listen_address, allowing nodes to communicate in both interfaces.
+# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2.
+#
+# listen_on_broadcast_address: false
+
+# port for the CQL native transport to listen for clients on
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# To disable the CQL native transport, remove this option and configure native_transport_port_ssl.
+native_transport_port: 9042
+
+# Like native_transport_port, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+native_shard_aware_transport_port: 19042
+
+# Enabling native transport encryption in client_encryption_options allows you to either use
+# encryption for the standard port or to use a dedicated, additional port along with the unencrypted
+# standard native_transport_port.
+# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption
+# for native_transport_port. Setting native_transport_port_ssl to a different value
+# from native_transport_port will use encryption for native_transport_port_ssl while
+# keeping native_transport_port unencrypted.
+#native_transport_port_ssl: 9142
+
+# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the
+# client-side port numbers.
+#native_shard_aware_transport_port_ssl: 19142
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 5000
+
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 2000
+# how long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+cas_contention_timeout_in_ms: 1000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Scylla enough about your network topology to route
+# requests efficiently
+# - it allows Scylla to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Scylla will do its best not to have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Scylla provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This can improve cache
+# locality when disabling read repair. Only appropriate for
+# single-datacenter deployments.
+# - GossipingPropertyFileSnitch
+# This should be your go-to snitch for production use. The rack
+# and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via
+# gossip. If cassandra-topology.properties exists, it is used as a
+# fallback, allowing migration from the PropertyFileSnitch.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Scylla will switch to the private IP after
+# establishing a connection.)
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's IP
+# address, respectively. Unless this happens to match your
+# deployment conventions, this is best used as an example of
+# writing a custom Snitch class and is provided in that spirit.
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# The address or interface to bind the native transport server to.
+#
+# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
+# to a single address, IP aliasing is not supported.
+#
+# Leaving rpc_address blank has the same effect as on listen_address
+# (i.e. it will be based on the configured hostname of the node).
+#
+# Note that unlike listen_address, you can specify 0.0.0.0, but you must also
+# set broadcast_rpc_address to a value other than 0.0.0.0.
+#
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+#
+# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address
+# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4
+# address will be used. If true the first ipv6 address will be used. Defaults to false preferring
+# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6.
+rpc_address: localhost
+# rpc_interface: eth1
+# rpc_interface_prefer_ipv6: false
+
+# port for REST API server
+api_port: 10000
+
+# IP for the REST API server
+api_address: 127.0.0.1
+
+# Log WARN on any batch size exceeding this value. 128 kiB per batch by default.
+# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
+batch_size_warn_threshold_in_kb: 128
+
+# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default.
+batch_size_fail_threshold_in_kb: 1024
+
+# Authentication backend, identifying users
+# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator,
+# PasswordAuthenticator}.
+#
+# - AllowAllAuthenticator performs no checks - set it to disable authentication.
+# - PasswordAuthenticator relies on username/password pairs to authenticate
+# users. It keeps usernames and hashed passwords in system_auth.credentials table.
+# Please increase system_auth keyspace replication factor if you use this authenticator.
+# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair
+# to authenticate in the same manner as PasswordAuthenticator, but improper credentials
+# result in being logged in as an anonymous user. Use for upgrading clusters' auth.
+# authenticator: AllowAllAuthenticator
+
+# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
+# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer,
+# CassandraAuthorizer}.
+#
+# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
+# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
+# increase system_auth keyspace replication factor if you use this authorizer.
+# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for
+# authorizing permission management. Otherwise, it allows all. Use for upgrading
+# clusters' auth.
+# authorizer: AllowAllAuthorizer
+
+# initial_token allows you to specify tokens manually. While you can use # it with
+# vnodes (num_tokens > 1, above) -- in which case you should provide a
+# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
+# that do not have vnodes enabled.
+# initial_token:
+
+# RPC address to broadcast to drivers and other Scylla nodes. This cannot
+# be set to 0.0.0.0. If left blank, this will be set to the value of
+# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
+# be set.
+# broadcast_rpc_address: 1.2.3.4
+
+# Uncomment to enable experimental features
+# experimental_features:
+# - udf
+# - alternator-streams
+# - broadcast-tables
+# - keyspace-storage-options
+
+# The directory where hints files are stored if hinted handoff is enabled.
+# hints_directory: /var/lib/scylla/hints
+
+# The directory where hints files are stored for materialized-view updates
+# view_hints_directory: /var/lib/scylla/view_hints
+
+# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff
+# May either be "true" or "false" to enable globally, or contain a list
+# of data centers to enable per-datacenter.
+# hinted_handoff_enabled: DC1,DC2
+# hinted_handoff_enabled: true
+
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+# max_hint_window_in_ms: 10800000 # 3 hours
+
+
+# Validity period for permissions cache (fetching permissions can be an
+# expensive operation depending on the authorizer, CassandraAuthorizer is
+# one example). Defaults to 10000, set to 0 to disable.
+# Will be disabled automatically for AllowAllAuthorizer.
+# permissions_validity_in_ms: 10000
+
+# Refresh interval for permissions cache (if enabled).
+# After this interval, cache entries become eligible for refresh. Upon next
+# access, an async reload is scheduled and the old value returned until it
+# completes. If permissions_validity_in_ms is non-zero, then this also must have
+# a non-zero value. Defaults to 2000. It's recommended to set this value to
+# be at least 3 times smaller than the permissions_validity_in_ms.
+# permissions_update_interval_in_ms: 2000
+
+# The partitioner is responsible for distributing groups of rows (by
+# partition key) across nodes in the cluster. You should leave this
+# alone for new clusters. The partitioner can NOT be changed without
+# reloading all data, so when upgrading you should set this to the
+# same partitioner you were already using.
+#
+# Murmur3Partitioner is currently the only supported partitioner,
+#
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# Total space to use for commitlogs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Scylla will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+#
+# A value of -1 (default) will automatically equate it to the total amount of memory
+# available for Scylla.
+commitlog_total_space_in_mb: -1
+
+# TCP port, for commands and data
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+# For security reasons, you should not expose this port to the internet. Firewall it if needed.
+# ssl_storage_port: 7001
+
+# listen_interface: eth0
+# listen_interface_prefer_ipv6: false
+
+# Whether to start the native transport server.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+# start_native_transport: true
+
+# The maximum size of allowed frame. Frame (requests) larger than this will
+# be rejected as invalid. The default is 256MB.
+# native_transport_max_frame_size_in_mb: 256
+
+# enable or disable keepalive on rpc/native connections
+# rpc_keepalive: true
+
+# Set to true to have Scylla create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# keyspace data. Removing these links is the operator's
+# responsibility.
+# incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Scylla won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+# snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+# auto_snapshot: true
+
+# When executing a scan, within or across a partition, we need to keep the
+# tombstones seen in memory so we can return them to the coordinator, which
+# will use them to make sure other replicas also know about the deleted rows.
+# With workloads that generate a lot of tombstones, this can cause performance
+# problems and even exhaust the server heap.
+# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
+# Adjust the thresholds here if you understand the dangers and want to
+# scan more tombstones anyway. These thresholds may also be adjusted at runtime
+# using the StorageService mbean.
+# tombstone_warn_threshold: 1000
+# tombstone_failure_threshold: 100000
+
+# Granularity of the collation index of rows within a partition.
+# Increase if your rows are large, or if you have a very large
+# number of rows per partition. The competing goals are these:
+# 1) a smaller granularity means more index entries are generated
+# and looking up rows within the partition by collation column
+# is faster
+# 2) but, Scylla will keep the collation index in memory for hot
+# rows (as part of the key cache), so a larger granularity means
+# you can cache more hot rows
+# column_index_size_in_kb: 64
+
+# Auto-scaling of the promoted index prevents running out of memory
+# when the promoted index grows too large (due to partitions with many rows
+# vs. too small column_index_size_in_kb). When the serialized representation
+# of the promoted index grows by this threshold, the desired block size
+# for this partition (initialized to column_index_size_in_kb)
+# is doubled, to decrease the sampling resolution by half.
+#
+# To disable promoted index auto-scaling, set the threshold to 0.
+# column_index_auto_scale_threshold_in_kb: 10240
+
+# Log a warning when writing partitions larger than this value
+# compaction_large_partition_warning_threshold_mb: 1000
+
+# Log a warning when writing rows larger than this value
+# compaction_large_row_warning_threshold_mb: 10
+
+# Log a warning when writing cells larger than this value
+# compaction_large_cell_warning_threshold_mb: 1
+
+# Log a warning when row number is larger than this value
+# compaction_rows_count_warning_threshold: 100000
+
+# Log a warning when writing a collection containing more elements than this value
+# compaction_collection_elements_count_warning_threshold: 10000
+
+# How long the coordinator should wait for seq or index scans to complete
+# range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+# counter_write_request_timeout_in_ms: 5000
+# How long a coordinator should continue to retry a CAS operation
+# that contends with other proposals for the same row
+# cas_contention_timeout_in_ms: 1000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because unless auto_snapshot is disabled
+# we need to flush first so we can snapshot before removing the data.)
+# truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+# request_timeout_in_ms: 10000
+
+# Enable or disable inter-node encryption.
+# You must also generate keys and provide the appropriate key and trust store locations and passwords.
+#
+# The available internode options are : all, none, dc, rack
+# If set to dc scylla will encrypt the traffic between the DCs
+# If set to rack scylla will encrypt the traffic between the racks
+#
+# SSL/TLS algorithm and ciphers used can be controlled by
+# the priority_string parameter. Info on priority string
+# syntax and values is available at:
+# https://gnutls.org/manual/html_node/Priority-Strings.html
+#
+# The require_client_auth parameter allows you to
+# restrict access to service based on certificate
+# validation. Client must provide a certificate
+# accepted by the used trust store to connect.
+#
+# server_encryption_options:
+# internode_encryption: none
+# certificate: conf/scylla.crt
+# keyfile: conf/scylla.key
+# truststore:
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# enable or disable client/server encryption.
+# client_encryption_options:
+# enabled: false
+# certificate: conf/scylla.crt
+# keyfile: conf/scylla.key
+# truststore:
+# certficate_revocation_list:
+# require_client_auth: False
+# priority_string:
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+# internode_compression: none
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+# inter_dc_tcp_nodelay: false
+
+# Relaxation of environment checks.
+#
+# Scylla places certain requirements on its environment. If these requirements are
+# not met, performance and reliability can be degraded.
+#
+# These requirements include:
+# - A filesystem with good support for asynchronous I/O (AIO). Currently,
+# this means XFS.
+#
+# false: strict environment checks are in place; do not start if they are not met.
+# true: relaxed environment checks; performance and reliability may degraade.
+#
+# developer_mode: false
+
+
+# Idle-time background processing
+#
+# Scylla can perform certain jobs in the background while the system is otherwise idle,
+# freeing processor resources when there is other work to be done.
+#
+# defragment_memory_on_idle: true
+#
+# prometheus port
+# By default, Scylla opens prometheus API port on port 9180
+# setting the port to 0 will disable the prometheus API.
+# prometheus_port: 9180
+#
+# prometheus address
+# Leaving this blank will set it to the same value as listen_address.
+# This means that by default, Scylla listens to the prometheus API on the same
+# listening address (and therefore network interface) used to listen for
+# internal communication. If the monitoring node is not in this internal
+# network, you can override prometheus_address explicitly - e.g., setting
+# it to 0.0.0.0 to listen on all interfaces.
+# prometheus_address: 1.2.3.4
+
+# Distribution of data among cores (shards) within a node
+#
+# Scylla distributes data within a node among shards, using a round-robin
+# strategy:
+# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ...
+#
+# Scylla versions 1.6 and below used just one repetition of the pattern;
+# this interfered with data placement among nodes (vnodes).
+#
+# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this
+# provides for better data distribution.
+#
+# the value below is log (base 2) of the number of repetitions.
+#
+# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and
+# below.
+#
+# Keep at 12 for new clusters.
+murmur3_partitioner_ignore_msb_bits: 12
+
+# Use on a new, parallel algorithm for performing aggregate queries.
+# Set to `false` to fall-back to the old algorithm.
+# enable_parallelized_aggregation: true
+
+# Time for which task manager task is kept in memory after it completes.
+# task_ttl_in_seconds: 0
+
+# In materialized views, restrictions are allowed only on the view's primary key columns.
+# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part
+# of the view's primary key. These invalid restrictions were ignored.
+# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions.
+#
+# Can be true, false, or warn.
+# * `true`: IS NOT NULL is allowed only on the view's primary key columns,
+# trying to use it on other columns will cause an error, as it should.
+# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored.
+# It's useful for backwards compatibility.
+# * `warn`: The same as false, but there's a warning about invalid view restrictions.
+#
+# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`.
+# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`)
+# to make sure that trying to create an invalid view causes an error.
+strict_is_not_null_in_views: true
+
+# The Unix Domain Socket the node uses for maintenance socket.
+# The possible options are:
+# * ignore: the node will not open the maintenance socket,
+# * workdir: the node will open the maintenance socket on the path /cql.m,
+# where is a path defined by the workdir configuration option,
+# * : the node will open the maintenance socket on the path .
+maintenance_socket: ignore
+
+# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL
+# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime
+# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating
+# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false
+# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers.
+# live_updatable_config_params_changeable_via_cql: true
+
+# ****************
+# * GUARDRAILS *
+# ****************
+
+# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold.
+# Please note that the value of 0 is always allowed,
+# which means that having no replication at all, i.e. RF = 0, is always valid.
+# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled.
+# Commenting out a guardrail also means it is disabled.
+# minimum_replication_factor_fail_threshold: -1
+# minimum_replication_factor_warn_threshold: 3
+# maximum_replication_factor_warn_threshold: -1
+# maximum_replication_factor_fail_threshold: -1
+
+# Guardrails to warn about or disallow creating a keyspace with specific replication strategy.
+# Each of these 2 settings is a list storing replication strategies considered harmful.
+# The replication strategies to choose from are:
+# 1) SimpleStrategy,
+# 2) NetworkTopologyStrategy,
+# 3) LocalStrategy,
+# 4) EverywhereStrategy
+#
+# replication_strategy_warn_list:
+# - SimpleStrategy
+# replication_strategy_fail_list:
+
+# Enables the tablets feature.
+# When enabled, newly created keyspaces will have tablets enabled by default.
+# That can be explicitly disabled in the CREATE KEYSPACE query
+# by using the `tablets = {'enabled': false}` replication option.
+#
+# When the tablets feature is disabled, there is no way to enable tablets
+# per keyspace.
+#
+# Note that creating keyspaces with tablets enabled is irreversible.
+# Disabling the tablets feature may impact existing keyspaces that were created with tablets.
+# For example, the tablets map would remain "frozen" and will not respond to topology changes
+# like adding, removing, or replacing nodes, or to replication factor changes.
+enable_tablets: true
+api_ui_dir: /opt/scylladb/swagger-ui/dist/
+api_doc_dir: /opt/scylladb/api/api-doc/
diff --git a/nifi-extension-bundles/nifi-cql-bundle/pom.xml b/nifi-extension-bundles/nifi-cql-bundle/pom.xml
new file mode 100644
index 000000000000..c03b534ae5bc
--- /dev/null
+++ b/nifi-extension-bundles/nifi-cql-bundle/pom.xml
@@ -0,0 +1,59 @@
+
+
+
+ 4.0.0
+
+
+ nifi-standard-shared-bom
+ org.apache.nifi
+ 2.5.0-SNAPSHOT
+ ../nifi-standard-shared-bundle/nifi-standard-shared-bom
+
+
+
+ 3.11.5
+ 19.0
+
+
+ nifi-cql-bundle
+ pom
+
+
+ nifi-cql-processors
+ nifi-cql-nar
+ nifi-cql-services-api
+ nifi-cql-services-api-nar
+ nifi-cassandra-session-provider-service
+ nifi-cassandra-session-provider-service-nar
+ nifi-scylladb-session-provider-service
+ nifi-scylladb-session-provider-service-nar
+
+
+
+
+
+ org.apache.nifi
+ nifi-cql-processors
+ 2.5.0-SNAPSHOT
+
+
+ com.google.guava
+ guava
+ ${cassandra.guava.version}
+
+
+
+
diff --git a/nifi-extension-bundles/pom.xml b/nifi-extension-bundles/pom.xml
index c6a536f7e16c..94c3eb88998e 100755
--- a/nifi-extension-bundles/pom.xml
+++ b/nifi-extension-bundles/pom.xml
@@ -95,5 +95,6 @@
nifi-github-bundle
nifi-gitlab-bundle
nifi-atlassian-bundle
+ nifi-cql-bundle