forked from docs/doc-exports
Reviewed-by: Kacur, Michal <michal.kacur@t-systems.com> Co-authored-by: Yang, Tong <yangtong2@huawei.com> Co-committed-by: Yang, Tong <yangtong2@huawei.com>
181 lines
26 KiB
HTML
181 lines
26 KiB
HTML
<a name="mrs_01_24036"></a><a name="mrs_01_24036"></a>
|
|
|
|
<h1 class="topictitle1">Stream Write</h1>
|
|
<div id="body32001227"><p id="mrs_01_24036__en-us_topic_0000001173631458_p8060118">The HoodieDeltaStreamer tool provided by Hudi supports stream write. You can also use SparkStreaming to write data in microbatch mode. HoodieDeltaStreamer provides the following functions:</p>
|
|
<ul id="mrs_01_24036__en-us_topic_0000001173631458_ul11839154814112"><li id="mrs_01_24036__en-us_topic_0000001173631458_li11839194881119">Supports multiple data sources, such as Kafka and DFS.</li><li id="mrs_01_24036__en-us_topic_0000001173631458_li9839154891115">Manages checkpoints, rollback, and recovery to ensure exactly-once semantics.</li><li id="mrs_01_24036__en-us_topic_0000001173631458_li28395481110">Supports user-defined transformations.</li></ul>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p20792185742216">Example:</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p63958215255">Prepare the configuration file <strong id="mrs_01_24036__en-us_topic_0000001173631458_b17521373144">kafka-source.properties</strong>.</p>
|
|
<pre class="screen" id="mrs_01_24036__en-us_topic_0000001173631458_screen610745911126">#Hudi configuration
|
|
hoodie.datasource.write.recordkey.field=id
|
|
hoodie.datasource.write.partitionpath.field=age
|
|
hoodie.upsert.shuffle.parallelism=100
|
|
#hive config
|
|
hoodie.datasource.hive_sync.table=hudimor_deltastreamer_partition
|
|
hoodie.datasource.hive_sync.partition_fields=age
|
|
hoodie.datasource.hive_sync.partition_extractor_class=org.apache.hudi.hive.MultiPartKeysValueExtractor
|
|
hoodie.datasource.hive_sync.use_jdbc=false
|
|
hoodie.datasource.hive_sync.support_timestamp=true
|
|
# Kafka Source topic
|
|
hoodie.deltastreamer.source.kafka.topic=hudimor_deltastreamer_partition
|
|
#checkpoint
|
|
hoodie.deltastreamer.checkpoint.provider.path=hdfs://hacluster/tmp/huditest/hudimor_deltastreamer_partition
|
|
# Kafka props
|
|
# The kafka cluster we want to ingest from
|
|
bootstrap.servers= xx.xx.xx.xx:xx
|
|
auto.offset.reset=earliest
|
|
#auto.offset.reset=latest
|
|
group.id=hoodie-delta-streamer
|
|
offset.rang.limit=10000</pre>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p125405434265">Run the following commands to specify the HoodieDeltaStreamer execution parameters (for details about the parameter configuration, visit the official website <a href="https://hudi.apache.org/" target="_blank" rel="noopener noreferrer">https://hudi.apache.org/ </a>):</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p13711104718296"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b1167715101617">spark-submit --master yarn</strong></p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p10196105418327"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b31721315191618">--jars /opt/hudi-java-examples-1.0.jar</strong> // Specify the Hudi <strong id="mrs_01_24036__en-us_topic_0000001173631458_b879105164616">jars</strong> directory required for Spark running.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p10258112312255"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b2971122112161">--driver-memory 1g</strong></p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p16258162318259"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b109791221111616">--executor-memory 1g --executor-cores 1 --num-executors 2 --conf spark.kryoserializer.buffer.max=128m</strong></p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p1325822312258"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b398319214165">--driver-class-path /opt/client/Hudi/hudi/conf:/opt/client/Hudi/hudi/lib/*:/opt/client/Spark2x/spark/jars/*:/opt/hudi-examples-0.6.1-SNAPSHOT.jar:/opt/hudi-examples-0.6.1-SNAPSHOT-tests.jar</strong> // Specify the Hudi <strong id="mrs_01_24036__en-us_topic_0000001173631458_b1914874732017">jars</strong> directory required by the Spark driver.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p1358413138294"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b10914112861618">--class org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer spark-internal</strong></p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p14258723192519"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b1919102881610">--props file:///opt/kafka-source.properties</strong> // Specify the configuration file. You need to set the configuration file path to the HDFS path when submitting tasks in yarn-cluster mode.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p9735181842919"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b152717353173">--target-base-path /tmp/huditest/hudimor1_deltastreamer_partition</strong> // Specify the path of the Hudi table.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p1746352217297"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b10445143911170">--table-type MERGE_ON_READ</strong> // Specify the type of the Hudi table to be written.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p1625862382510"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b1012054361714">--target-table hudimor_deltastreamer_partition</strong> // Specify the Hudi table name.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p6129105816357"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b3909146161714">--source-ordering-field name</strong> // Specify the columns to be pre-combined in the Hudi table.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p15698145711286"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b4423145261711">--source-class org.apache.hudi.utilities.sources.JsonKafkaSource</strong> // Set the consumed data source to <strong id="mrs_01_24036__en-us_topic_0000001173631458_b4597131913517">JsonKafkaSource</strong>. Different source classes are specified based on different data sources.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p325832317251"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b1414083510182">--schemaprovider-class com.xxxx.bigdata.hudi.examples.DataSchemaProviderExample</strong> // Specify the schema required by the Hudi table.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p126844719286"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b4139139161814">--transformer-class com.xxx.bigdata.hudi.examples.TransformerExample</strong> // Specify how to process the data obtained from the data source. Set this parameter based on service requirements.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p964514504288"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b129210311288">--enable-hive-sync</strong> // Enable Hive synchronization to synchronize the Hudi table to Hive.</p>
|
|
<p id="mrs_01_24036__en-us_topic_0000001173631458_p172581323182511"><strong id="mrs_01_24036__en-us_topic_0000001173631458_b7255503187">--continuous</strong> // Set the stream processing mode to <strong id="mrs_01_24036__en-us_topic_0000001173631458_b187325464213">continuous</strong>.</p>
|
|
<div class="section" id="mrs_01_24036__section12455155731216"><h4 class="sectiontitle">Stream Write Using HoodieMultiTableDeltaStreamer</h4><div class="note" id="mrs_01_24036__note836473124417"><img src="public_sys-resources/note_3.0-en-us.png"><span class="notetitle"> </span><div class="notebody"><p id="mrs_01_24036__p11364163144419">HoodieMultiTableDeltaStreamer streaming write applies only to MRS 3.2.0 or later.</p>
|
|
</div></div>
|
|
<p id="mrs_01_24036__p27501885316">HoodieDeltaStreamer allows you to capture data from multiple types of source tables and write the data to Hudi tables. However, you can only write data in one source table to one destination table. By contrast, HoodieMultiTableDeltaStreamer supports data write from multiple source tables to one or multiple destination tables.</p>
|
|
<ul id="mrs_01_24036__ul141221423204618"><li id="mrs_01_24036__li14674124616464"><strong id="mrs_01_24036__b127716088791549">The following example describes how to write data in two Kafka source tables to two Hudi tables.</strong><div class="note" id="mrs_01_24036__note19332162316915"><img src="public_sys-resources/note_3.0-en-us.png"><span class="notetitle"> </span><div class="notebody"><p id="mrs_01_24036__p6146124814589">Set the following parameters:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen1753615503476"><span id="mrs_01_24036__ph186308111232">// Specify the target table.</span>
|
|
<span id="mrs_01_24036__ph3630191119310">hoodie.deltastreamer.ingestion.tablesToBeIngested=Directory name.target table</span>
|
|
<span id="mrs_01_24036__ph3631131119319">//Specify all source tables to specific destination tables.</span>
|
|
<span id="mrs_01_24036__ph763113111331">hoodie.deltastreamer.source.sourcesBoundTo.Destination table=<em id="mrs_01_24036__i091335844712">Directory name.Source table 1,Directory name.Source table 2</em></span>
|
|
<span id="mrs_01_24036__ph1863110111312">// Specify the configuration file path of each source table.</span>
|
|
<span id="mrs_01_24036__ph1863113112316">Hoodie.deltastreamer.Source.directory name.Source table 1.configFile=Path 1</span>
|
|
<span id="mrs_01_24036__ph563111110316">Hoodie.deltastreamer.source.Directory name.Source table 2.configFile=Path 2</span>
|
|
<span id="mrs_01_24036__ph166311911535">// Specify the check point of each source table. The format of the recovery point varies according to the source table type. For example, the recovery point format of Kafka source is "Topic name,Partition name:offset".</span>
|
|
<span id="mrs_01_24036__ph20631111236">hoodie.deltastreamer.current.source.checkpoint=Topic name,Partition name:offset</span>
|
|
<span id="mrs_01_24036__ph1563112117314">// Specify the associated table (Hudi table) of each source table. If there are multiple associated tables, separate them with commas (,).</span>
|
|
<span id="mrs_01_24036__ph1763215111534">hoodie.deltastreamer.source.associated.tables=hdfs://hacluster/....., hdfs://hacluster/.....</span>
|
|
<span id="mrs_01_24036__ph176321311939">// Specify the transform operation before the data in each source table is written to Hudi. Note that the columns to be written must be listed. Do not use select *.</span>
|
|
<span id="mrs_01_24036__ph17632181112319">// <SRC> indicates the current source table and cannot be changed.</span>
|
|
<span id="mrs_01_24036__ph5632711232">hoodie.deltastreamer.transformer.sql=select field1,field2,field3,... from <SRC></span></pre>
|
|
</div></div>
|
|
<p id="mrs_01_24036__p1359183518172">Spark submission command:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen2032118020588"><span id="mrs_01_24036__ph106353118317">spark-submit \</span>
|
|
<span id="mrs_01_24036__ph5635191112311">--master yarn \</span>
|
|
<span id="mrs_01_24036__ph963591120312">--driver-memory 1g \</span>
|
|
<span id="mrs_01_24036__ph10635611433">--executor-memory 1g \</span>
|
|
<span id="mrs_01_24036__ph8635201111311">--executor-cores 1 \</span>
|
|
<span id="mrs_01_24036__ph166351115315">--num-executors 5 \</span>
|
|
<span id="mrs_01_24036__ph36351411231">--conf spark.driver.extraClassPath=<span id="mrs_01_24036__ph1481911208417">/opt/client</span>/Hudi/hudi/conf:<span id="mrs_01_24036__ph727416261842">/opt/client</span>/Hudi/hudi/lib/*:<span id="mrs_01_24036__ph123521341744">/opt/client</span>/Spark2x/spark/jars/* \</span>
|
|
<span id="mrs_01_24036__ph263661112311">--class org.apache.hudi.utilities.deltastreamer.HoodieMultiTableDeltaStreamer <span id="mrs_01_24036__ph19471841046">/opt/client</span>/Hudi/hudi/lib/hudi-utilities_2.12-*.jar \</span>
|
|
<span id="mrs_01_24036__ph156361911036">--props file:///opt/hudi/testconf/sourceCommon.properties \</span>
|
|
<span id="mrs_01_24036__ph196361111933">--config-folder file:///opt/hudi/testconf/ \</span>
|
|
<span id="mrs_01_24036__ph1263631119313">--source-class org.apache.hudi.utilities.sources.JsonKafkaSource \</span>
|
|
<span id="mrs_01_24036__ph156361411936">--schemaprovider-class org.apache.hudi.examples.common.HoodieMultiTableDeltaStreamerSchemaProvider \</span>
|
|
<span id="mrs_01_24036__ph663612111139">--transformer-class org.apache.hudi.utilities.transform.SqlQueryBasedTransformer \</span>
|
|
<span id="mrs_01_24036__ph1863717116312">--source-ordering-field col6 \</span>
|
|
<span id="mrs_01_24036__ph363720115314">--base-path-prefix hdfs://hacluster/tmp/ \</span>
|
|
<span id="mrs_01_24036__ph163715111315">--table-type COPY_ON_WRITE \</span>
|
|
<span id="mrs_01_24036__ph1363718111739">--target-table KafkaToHudi \</span>
|
|
<span id="mrs_01_24036__ph86371511635">--enable-hive-sync \</span>
|
|
<span id="mrs_01_24036__ph66371411133">--allow-fetch-from-multiple-sources \</span>
|
|
<span id="mrs_01_24036__ph163713113311">--allow-continuous-when-multiple-sources</span></pre>
|
|
<div class="note" id="mrs_01_24036__note124010332498"><img src="public_sys-resources/note_3.0-en-us.png"><span class="notetitle"> </span><div class="notebody"><ol id="mrs_01_24036__ol734016560266"><li id="mrs_01_24036__li2340125615264">When the <strong id="mrs_01_24036__b1158443172619">source</strong> type is <strong id="mrs_01_24036__b1299173672619">kafka source</strong>, the schema provider class specified by <strong id="mrs_01_24036__b6869653112620">--schemaprovider-class</strong> needs to be developed by users.</li><li id="mrs_01_24036__li17340145611264"><strong id="mrs_01_24036__b376814109270">--allow-fetch-from-multiple-sources</strong> indicates that multi-source table writing is enabled.</li><li id="mrs_01_24036__li1934065642612"><strong id="mrs_01_24036__b5414021182712">--allow-continuous-when-multiple-sources</strong> indicates that multi-source table continuous write is enabled. If this parameter is not set, the task ends after all source tables are written once.</li></ol>
|
|
</div></div>
|
|
<p id="mrs_01_24036__p12859153521719">sourceCommon.properties:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen8523111952310"><span id="mrs_01_24036__ph96446114313">hoodie.deltastreamer.ingestion.tablesToBeIngested=testdb.KafkaToHudi</span>
|
|
<span id="mrs_01_24036__ph116441115318">hoodie.deltastreamer.source.sourcesBoundTo.KafkaToHudi=source1,source2</span>
|
|
<span id="mrs_01_24036__ph13645811433">hoodie.deltastreamer.source.default.source1.configFile=file:///opt/hudi/testconf/source1.properties</span>
|
|
<span id="mrs_01_24036__ph8645161112313">hoodie.deltastreamer.source.default.source2.configFile=file:///opt/hudi/testconf/source2.properties</span>
|
|
<span id="mrs_01_24036__ph96451311037"></span>
|
|
<span id="mrs_01_24036__ph36451311134">hoodie.datasource.write.keygenerator.class=org.apache.hudi.keygen.SimpleKeyGenerator</span>
|
|
<span id="mrs_01_24036__ph156452113310">hoodie.datasource.write.partitionpath.field=col0</span>
|
|
<span id="mrs_01_24036__ph9645111139">hoodie.datasource.write.recordkey.field=primary_key</span>
|
|
<span id="mrs_01_24036__ph19645211539">hoodie.datasource.write.precombine.field=col6</span>
|
|
<span id="mrs_01_24036__ph10645151117317"></span>
|
|
<span id="mrs_01_24036__ph1664510111733">hoodie.datasource.hive_sync.table=kafkatohudisync</span>
|
|
<span id="mrs_01_24036__ph76456119318">hoodie.datasource.hive_sync.partition_fields=col0</span>
|
|
<span id="mrs_01_24036__ph18645111531">hoodie.datasource.hive_sync.partition_extractor_class=org.apache.hudi.hive.MultiPartKeysValueExtractor</span>
|
|
<span id="mrs_01_24036__ph156451118314"></span>
|
|
<span id="mrs_01_24036__ph5645911536">bootstrap.servers=192.168.34.221:21005,192.168.34.136:21005,192.168.34.175:21005</span>
|
|
<span id="mrs_01_24036__ph11645141117311">auto.offset.reset=latest</span>
|
|
<span id="mrs_01_24036__ph176452111134">group.id=hoodie-test</span></pre>
|
|
<p id="mrs_01_24036__p457618375238">source1.properties:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen132181403242"><span id="mrs_01_24036__ph1964819111835">hoodie.deltastreamer.current.source.name=source1 // Specify the name of a Kafka source table.</span>
|
|
<span id="mrs_01_24036__ph2064821110310">hoodie.deltastreamer.source.kafka.topic=s1</span>
|
|
<span id="mrs_01_24036__ph156481611338">hoodie.deltastreamer.current.source.checkpoint=s1,0:0,1:0 // Checkpoint of the source table when the task is started. The deltastreamer tasks resume from offset 0 of partition 0 and offset 0 of partition 1.</span>
|
|
<span id="mrs_01_24036__ph66486111838">// Specify the Hudi table to be combined with the source1 table. If the Hudi table has been synchronized to Hive, skip this step and use the table name in the SQL statement.</span>
|
|
<span id="mrs_01_24036__ph16486111932">hoodie.deltastreamer.source.associated.tables=hdfs://hacluster/tmp/huditest/tb_test_cow_par</span>
|
|
<span id="mrs_01_24036__ph96488111835">// <SRC> indicates the current source table, that is, source1. The value is fixed.</span>
|
|
<span id="mrs_01_24036__ph146486111836">hoodie.deltastreamer.transformer.sql=select A.primary_key, A.col0, B.col1, B.col2, A.col3, A.col4, B.col5, B.col6, B.col7 from <SRC> as A join tb_test_cow_par as B on A.primary_key = B.primary_key</span></pre>
|
|
<p id="mrs_01_24036__p183223714231">source2.properties</p>
|
|
<pre class="screen" id="mrs_01_24036__screen1244817157247"><span id="mrs_01_24036__ph864917116313">hoodie.deltastreamer.current.source.name=source2</span>
|
|
<span id="mrs_01_24036__ph10649011131">hoodie.deltastreamer.source.kafka.topic=s2</span>
|
|
<span id="mrs_01_24036__ph664914111636">hoodie.deltastreamer.current.source.checkpoint=s2,0:0,1:0</span>
|
|
<span id="mrs_01_24036__ph5649311231">hoodie.deltastreamer.source.associated.tables=hdfs://hacluster/tmp/huditest/tb_test_cow_par</span>
|
|
<span id="mrs_01_24036__ph16505115311">hoodie.deltastreamer.transformer.sql=select A.primary_key, A.col0, B.col1, B.col2, A.col3, A.col4, B.col5, B.col6, B.col7 from <SRC> as A join tb_test_cow_par as B on A.primary_key = B.primary_key</span></pre>
|
|
</li><li id="mrs_01_24036__li1292914194712"><strong id="mrs_01_24036__b161964980091549">The following example describes how to write data in two Hudi tables to one Hudi table</strong><p id="mrs_01_24036__p4626451115219">Spark submission command:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen151541718121918"><span id="mrs_01_24036__ph196527111832">spark-submit \</span>
|
|
<span id="mrs_01_24036__ph36521811932">--master yarn \</span>
|
|
<span id="mrs_01_24036__ph1653201115316">--driver-memory 1g \</span>
|
|
<span id="mrs_01_24036__ph12653411731">--executor-memory 1g \</span>
|
|
<span id="mrs_01_24036__ph765318111938">--executor-cores 1 \</span>
|
|
<span id="mrs_01_24036__ph20653181117314">--num-executors 2 \</span>
|
|
<span id="mrs_01_24036__ph116531611738">--conf spark.driver.extraClassPath=<span id="mrs_01_24036__ph638444917415">/opt/client</span>/Hudi/hudi/conf:<span id="mrs_01_24036__ph10358185310412">/opt/client</span>/Hudi/hudi/lib/*:<span id="mrs_01_24036__ph28549561048">/opt/client</span>/Spark2x/spark/jars/* \</span>
|
|
<span id="mrs_01_24036__ph86536110310">--class org.apache.hudi.utilities.deltastreamer.HoodieMultiTableDeltaStreamer <span id="mrs_01_24036__ph11133151651">/opt/client</span>/Hudi/hudi/lib/hudi-utilities_2.12-*.jar \</span>
|
|
<span id="mrs_01_24036__ph06549112030">--props file:///opt/testconf/sourceCommon.properties \</span>
|
|
<span id="mrs_01_24036__ph196541411930">--config-folder file:///opt/testconf/ \</span>
|
|
<span id="mrs_01_24036__ph186541211136">--source-class org.apache.hudi.utilities.sources.HoodieIncrSource \ // Specify that the source table is a Hudi table, which can only be COW.</span>
|
|
<span id="mrs_01_24036__ph136542111739">--payload-class org.apache.hudi.common.model.OverwriteNonDefaultsWithLatestAvroPayload \ // Specify a payload, which determines how the original value is changed to a new value.</span>
|
|
<span id="mrs_01_24036__ph186545111315">--transformer-class org.apache.hudi.utilities.transform.SqlQueryBasedTransformer \ // Specify a transformer class. If the schema of the source table is different from that of the target table, the source table data can be written to the target table only after being transformed.</span>
|
|
<span id="mrs_01_24036__ph965431118319">--source-ordering-field col6 \</span>
|
|
<span id="mrs_01_24036__ph9654611538">--base-path-prefix hdfs://hacluster/tmp/ \ // Path for saving the destination tables</span>
|
|
<span id="mrs_01_24036__ph66541711530">--table-type MERGE_ON_READ \ // Type of the destination table, which can be COW or MOR.</span>
|
|
<span id="mrs_01_24036__ph17654211939">--target-table tb_test_mor_par_300 \ // Specify the name of the target table. When you write data in multiple source tables to a target table, the name of the target table must be specified.</span>
|
|
<span id="mrs_01_24036__ph4654211932">--checkpoint 000 \ // Specify a checkpoint (commit timestamp), which indicates that Delta Streamer is restored from this checkpoint. 000 indicates that Delta Streamer is restored from the beginning.</span>
|
|
<span id="mrs_01_24036__ph17654911734">--enable-hive-sync \</span>
|
|
<span id="mrs_01_24036__ph365431114313">--allow-fetch-from-multiple-sources \</span>
|
|
<span id="mrs_01_24036__ph1165412111435">--allow-continuous-when-multiple-sources \</span>
|
|
<span id="mrs_01_24036__ph13655511835">--op UPSERT // Specify the write type.</span></pre>
|
|
<div class="note" id="mrs_01_24036__note193977511277"><img src="public_sys-resources/note_3.0-en-us.png"><span class="notetitle"> </span><div class="notebody"><ul id="mrs_01_24036__ul995316338287"><li id="mrs_01_24036__li189533338281">If the <strong id="mrs_01_24036__b1664912511284">source</strong> type is <strong id="mrs_01_24036__b6656727102813">HoodieIncrSourc</strong>, <strong id="mrs_01_24036__b397533312820">--schemaprovider-class</strong> does not need to be specified.</li><li id="mrs_01_24036__li17953103319285">If <strong id="mrs_01_24036__b1354844113294">transformer-class</strong> is set to <strong id="mrs_01_24036__b1684357193017">SqlQueryBasedTransformer</strong>, you can use SQL queries to convert the data structure of the source table to that of the destination table.</li></ul>
|
|
</div></div>
|
|
<p id="mrs_01_24036__p6340133213533">file:///opt/testconf/sourceCommon.properties:</p>
|
|
<pre class="screen" id="mrs_01_24036__screen134731623125"><span id="mrs_01_24036__ph7662131119310"># Common properties of source tables</span>
|
|
<span id="mrs_01_24036__ph18662141118318">hoodie.deltastreamer.ingestion.tablesToBeIngested=testdb.tb_test_mor_par_300 // Specify a target table (common property) to which multiple source tables are written. </span>
|
|
<span id="mrs_01_24036__ph666217111136">hoodie.deltastreamer.source.sourcesBoundTo.tb_test_mor_par_300=testdb.tb_test_mor_par_100,testdb.tb_test_mor_par_200 //Specify multiple source tables.</span>
|
|
<span id="mrs_01_24036__ph116621111331">hoodie.deltastreamer.source.testdb.tb_test_mor_par_100.configFile=file:///opt/testconf/tb_test_mor_par_100.properties // Property file path of the source table tb_test_mor_par_100</span>
|
|
<span id="mrs_01_24036__ph766221117314">hoodie.deltastreamer.source.testdb.tb_test_mor_par_200.configFile=file:///opt/testconf/tb_test_mor_par_200.properties //Property file path of the source table tb_test_mor_par_200</span>
|
|
<span id="mrs_01_24036__ph2662181111313"></span>
|
|
<span id="mrs_01_24036__ph1866201119318"># Hudi write configurations shared by all source tables. The independent configurations of a source table need to be written to its property file.</span>
|
|
<span id="mrs_01_24036__ph1066218111030">hoodie.datasource.write.keygenerator.class=org.apache.hudi.keygen.SimpleKeyGenerator</span>
|
|
<span id="mrs_01_24036__ph1666215111335">hoodie.datasource.write.partitionpath.field=col0</span>
|
|
<span id="mrs_01_24036__ph12662511339">hoodie.datasource.write.recordkey.field=primary_key</span>
|
|
<span id="mrs_01_24036__ph15663191110320">hoodie.datasource.write.precombine.field=col6</span></pre>
|
|
<p id="mrs_01_24036__p1896910324534">file:///opt/testconf/tb_test_mor_par_100.properties</p>
|
|
<pre class="screen" id="mrs_01_24036__screen1019211174213"><span id="mrs_01_24036__ph14664121112312"># Configurations of the source table tb_test_mor_par_100</span>
|
|
<span id="mrs_01_24036__ph56641411438">hoodie.deltastreamer.source.hoodieincr.path=hdfs://hacluster/tmp/testdb/tb_test_mor_par_100 // Path of the source table</span>
|
|
<span id="mrs_01_24036__ph1664101110310">hoodie.deltastreamer.source.hoodieincr.partition.fields=col0 // Partitioning key of the source table</span>
|
|
<span id="mrs_01_24036__ph166517111637">hoodie.deltastreamer.source.hoodieincr.read_latest_on_missing_ckpt=false</span>
|
|
<span id="mrs_01_24036__ph866591116317">hoodie.deltastreamer.source.associated.tables=hdfs://hacluster/tmp/testdb/tb_test_mor_par_400 //Specify the table to be associated with the source table.</span>
|
|
<span id="mrs_01_24036__ph7665131110313">hoodie.deltastreamer.transformer.sql=select A.primary_key, A.col0, B.col1, B.col2, A.col3, A.col4, B.col5, A.col6, B.col7 from <SRC> as A join tb_test_mor_par_400 as B on A.primary_key = B.primary_key //This configuration takes effect only when <strong id="mrs_01_24036__b1958114910249">transformer-class</strong> is set to <strong id="mrs_01_24036__b7474513172416">SqlQueryBasedTransformer</strong>.</span></pre>
|
|
<div class="p" id="mrs_01_24036__p0808511621">file:///opt/testconf/tb_test_mor_par_200.properties<pre class="screen" id="mrs_01_24036__screen157321311414"><span id="mrs_01_24036__ph106661011934"># Configurations of the source table tb_test_mor_par_200</span>
|
|
<span id="mrs_01_24036__ph126662111537">hoodie.deltastreamer.source.hoodieincr.path=hdfs://hacluster/tmp/testdb/tb_test_mor_par_200</span>
|
|
<span id="mrs_01_24036__ph466610119318">hoodie.deltastreamer.source.hoodieincr.partition.fields=col0</span>
|
|
<span id="mrs_01_24036__ph766612111538">hoodie.deltastreamer.source.hoodieincr.read_latest_on_missing_ckpt=false</span>
|
|
<span id="mrs_01_24036__ph1166610119312">hoodie.deltastreamer.source.associated.tables=hdfs://hacluster/tmp/testdb/tb_test_mor_par_400</span>
|
|
<span id="mrs_01_24036__ph266613112310">hoodie.deltastreamer.transformer.sql=select A.primary_key, A.col0, B.col1, B.col2, A.col3, A.col4, B.col5, A.col6, B.col7 from <SRC> as A join tb_test_mor_par_400 as B on A.primary_key = B.primary_key //Convert the data structure of the source table to that of the destination table. If the source table needs to be associated with Hive, you can use the table name in the SQL query for association. If the source table needs to be associated with a Hudi table, you need to specify the path of the Hudi table first and then use the table name in the SQL query for association.</span>
|
|
<span id="mrs_01_24036__ph17667121120318"></span></pre>
|
|
</div>
|
|
</li></ul>
|
|
</div>
|
|
</div>
|
|
<div>
|
|
<div class="familylinks">
|
|
<div class="parentlink"><strong>Parent topic:</strong> <a href="mrs_01_24034.html">Write</a></div>
|
|
</div>
|
|
</div>
|
|
|