Import library¶
In [ ]:
import os
from typing import Text
from absl import logging
from tfx.orchestration import metadata, pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
Pipeline¶
In [ ]:
# pipeline name
PIPELINE_NAME = "mkavaldo-pipeline"
# Pipeline inputs
DATA_ROOT = "data"
COMPONENTS_MODULE_FILE = "modules/components.py"
TRANSFORM_MODULE_FILE = "modules/transform.py"
TRAINER_MODULE_FILE = "modules/trainer.py"
TUNER_MODULE_FILE = "modules/tuner.py"
# Pipeline outputs
OUTPUT_BASE = "output"
serving_model_dir = os.path.join(OUTPUT_BASE, 'serving_model')
pipeline_root = os.path.join(OUTPUT_BASE, PIPELINE_NAME)
metadata_path = os.path.join(pipeline_root, "metadata.sqlite")
Melakukan inisialisasi local pipeline.
In [ ]:
def init_local_pipeline(
components, pipeline_root: Text
) -> pipeline.Pipeline:
"""
Initializes a local pipeline with the specified components and pipeline root directory.
Configures Beam arguments for multi-processing mode.
Args:
components: A list of TFX components to be used in the pipeline.
pipeline_root: The root directory for pipeline output.
Returns:
pipeline.Pipeline: An instance of the configured TFX pipeline.
"""
logging.info(f"Pipeline root set to: {pipeline_root}")
beam_args = [
"--direct_running_mode=multi_processing"
# 0 auto-detect based on on the number of CPUs available
# during execution time.
"----direct_num_workers=0"
]
return pipeline.Pipeline(
pipeline_name=PIPELINE_NAME,
pipeline_root=pipeline_root,
components=components,
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path
),
eam_pipeline_args=beam_args
)
Menjalankan pipeline menggunakan Apache Beam sesuai dengan pengaturan sebelumnya.
In [ ]:
from modules.components import init_components
logging.set_verbosity(logging.INFO)
config = {
"DATA_ROOT": DATA_ROOT,
"training_module": TRAINER_MODULE_FILE,
"transform_module": TRANSFORM_MODULE_FILE,
"tuner_module": TUNER_MODULE_FILE,
"training_steps": 1000,
"eval_steps": 250,
"serving_model_dir": serving_model_dir,
}
components = init_components(config)
pipeline = init_local_pipeline(components, pipeline_root)
BeamDagRunner().run(pipeline=pipeline)
Trial 5 Complete [00h 00m 13s] val_accuracy: 1.0 Best val_accuracy So Far: 1.0 Total elapsed time: 00h 01m 09s Results summary Results in output\mkavaldo-pipeline\Tuner\.system\executor_execution\7\.temp\7\kt_random_search Showing 10 best trials Objective(name="val_accuracy", direction="max") Trial 1 summary Hyperparameters: units_1: 256 units_2: 128 units_3: 32 dropout_rate_1: 0.3 dropout_rate_2: 0.5 learning_rate: 0.0005 Score: 1.0 Trial 3 summary Hyperparameters: units_1: 128 units_2: 64 units_3: 128 dropout_rate_1: 0.2 dropout_rate_2: 0.5 learning_rate: 0.0005 Score: 1.0 Trial 4 summary Hyperparameters: units_1: 512 units_2: 256 units_3: 128 dropout_rate_1: 0.3 dropout_rate_2: 0.4 learning_rate: 0.0005 Score: 1.0 Trial 2 summary Hyperparameters: units_1: 128 units_2: 64 units_3: 32 dropout_rate_1: 0.2 dropout_rate_2: 0.5 learning_rate: 0.0005 Score: 0.9837398529052734 Trial 0 summary Hyperparameters: units_1: 512 units_2: 128 units_3: 64 dropout_rate_1: 0.4 dropout_rate_2: 0.4 learning_rate: 5e-05 Score: 0.9593495726585388
INFO:absl:node Tuner is finished. INFO:absl:node Trainer is running. INFO:absl:Running launcher for node_info { type { name: "tfx.components.trainer.component.Trainer" base_type: TRAIN } id: "Trainer" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } } inputs { inputs { key: "examples" value { channels { producer_node_query { id: "Transform" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Transform" } } } artifact_query { type { name: "Examples" base_type: DATASET } } output_key: "transformed_examples" } min_count: 1 } } inputs { key: "hyperparameters" value { channels { producer_node_query { id: "Tuner" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Tuner" } } } artifact_query { type { name: "HyperParameters" } } output_key: "best_hyperparameters" } } } inputs { key: "schema" value { channels { producer_node_query { id: "SchemaGen" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.SchemaGen" } } } artifact_query { type { name: "Schema" } } output_key: "schema" } } } inputs { key: "transform_graph" value { channels { producer_node_query { id: "Transform" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Transform" } } } artifact_query { type { name: "TransformGraph" } } output_key: "transform_graph" } } } } outputs { outputs { key: "model" value { artifact_spec { type { name: "Model" base_type: MODEL } } } } outputs { key: "model_run" value { artifact_spec { type { name: "ModelRun" } } } } } parameters { parameters { key: "custom_config" value { field_value { string_value: "null" } } } parameters { key: "eval_args" value { field_value { string_value: "{\n \"num_steps\": 100,\n \"splits\": [\n \"eval\"\n ]\n}" } } } parameters { key: "module_path" value { field_value { string_value: "trainer@output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl" } } } parameters { key: "train_args" value { field_value { string_value: "{\n \"num_steps\": 1000,\n \"splits\": [\n \"train\"\n ]\n}" } } } } upstream_nodes: "SchemaGen" upstream_nodes: "Transform" upstream_nodes: "Tuner" downstream_nodes: "Evaluator" downstream_nodes: "Pusher" execution_options { caching_options { enable_cache: true } } INFO:absl:MetadataStore with DB connection initialized WARNING:absl:ArtifactQuery.property_predicate is not supported. WARNING:absl:ArtifactQuery.property_predicate is not supported. WARNING:absl:ArtifactQuery.property_predicate is not supported. WARNING:absl:ArtifactQuery.property_predicate is not supported. INFO:absl:[Trainer] Resolved inputs: ({'transform_graph': [Artifact(artifact: id: 11 type_id: 25 uri: "output\\mkavaldo-pipeline\\Transform\\transform_graph\\6" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515062594 last_update_time_since_epoch: 1722515062594 , artifact_type: id: 25 name: "TransformGraph" )], 'examples': [Artifact(artifact: id: 6 type_id: 16 uri: "output\\mkavaldo-pipeline\\Transform\\transformed_examples\\6" properties { key: "split_names" value { string_value: "[\"train\", \"eval\"]" } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515062593 last_update_time_since_epoch: 1722515062593 , artifact_type: id: 16 name: "Examples" properties { key: "span" value: INT } properties { key: "split_names" value: STRING } properties { key: "version" value: INT } base_type: DATASET )], 'hyperparameters': [Artifact(artifact: id: 14 type_id: 28 uri: "output\\mkavaldo-pipeline\\Tuner\\best_hyperparameters\\7" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515428370 last_update_time_since_epoch: 1722515428370 , artifact_type: id: 28 name: "HyperParameters" )], 'schema': [Artifact(artifact: id: 3 type_id: 20 uri: "output\\mkavaldo-pipeline\\SchemaGen\\schema\\4" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515024722 last_update_time_since_epoch: 1722515024722 , artifact_type: id: 20 name: "Schema" )]},) INFO:absl:MetadataStore with DB connection initialized INFO:absl:Going to run a new execution 8 INFO:absl:Going to run a new execution: ExecutionInfo(execution_id=8, input_dict={'transform_graph': [Artifact(artifact: id: 11 type_id: 25 uri: "output\\mkavaldo-pipeline\\Transform\\transform_graph\\6" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515062594 last_update_time_since_epoch: 1722515062594 , artifact_type: id: 25 name: "TransformGraph" )], 'examples': [Artifact(artifact: id: 6 type_id: 16 uri: "output\\mkavaldo-pipeline\\Transform\\transformed_examples\\6" properties { key: "split_names" value { string_value: "[\"train\", \"eval\"]" } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515062593 last_update_time_since_epoch: 1722515062593 , artifact_type: id: 16 name: "Examples" properties { key: "span" value: INT } properties { key: "split_names" value: STRING } properties { key: "version" value: INT } base_type: DATASET )], 'hyperparameters': [Artifact(artifact: id: 14 type_id: 28 uri: "output\\mkavaldo-pipeline\\Tuner\\best_hyperparameters\\7" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515428370 last_update_time_since_epoch: 1722515428370 , artifact_type: id: 28 name: "HyperParameters" )], 'schema': [Artifact(artifact: id: 3 type_id: 20 uri: "output\\mkavaldo-pipeline\\SchemaGen\\schema\\4" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515024722 last_update_time_since_epoch: 1722515024722 , artifact_type: id: 20 name: "Schema" )]}, output_dict=defaultdict(<class 'list'>, {'model': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" , artifact_type: name: "Model" base_type: MODEL )], 'model_run': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Trainer\\model_run\\8" , artifact_type: name: "ModelRun" )]}), exec_properties={'eval_args': '{\n "num_steps": 250,\n "splits": [\n "eval"\n ]\n}', 'module_path': 'trainer@output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl', 'custom_config': 'null', 'train_args': '{\n "num_steps": 1000,\n "splits": [\n "train"\n ]\n}'}, execution_output_uri='output\\mkavaldo-pipeline\\Trainer\\.system\\executor_execution\\8\\executor_output.pb', stateful_working_dir='output\\mkavaldo-pipeline\\Trainer\\.system\\stateful_working_dir\\20240905-192334.648142', tmp_dir='output\\mkavaldo-pipeline\\Trainer\\.system\\executor_execution\\8\\.temp\\', pipeline_node=node_info { type { name: "tfx.components.trainer.component.Trainer" base_type: TRAIN } id: "Trainer" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } } inputs { inputs { key: "examples" value { channels { producer_node_query { id: "Transform" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Transform" } } } artifact_query { type { name: "Examples" base_type: DATASET } } output_key: "transformed_examples" } min_count: 1 } } inputs { key: "hyperparameters" value { channels { producer_node_query { id: "Tuner" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Tuner" } } } artifact_query { type { name: "HyperParameters" } } output_key: "best_hyperparameters" } } } inputs { key: "schema" value { channels { producer_node_query { id: "SchemaGen" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.SchemaGen" } } } artifact_query { type { name: "Schema" } } output_key: "schema" } } } inputs { key: "transform_graph" value { channels { producer_node_query { id: "Transform" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Transform" } } } artifact_query { type { name: "TransformGraph" } } output_key: "transform_graph" } } } } outputs { outputs { key: "model" value { artifact_spec { type { name: "Model" base_type: MODEL } } } } outputs { key: "model_run" value { artifact_spec { type { name: "ModelRun" } } } } } parameters { parameters { key: "custom_config" value { field_value { string_value: "null" } } } parameters { key: "eval_args" value { field_value { string_value: "{\n \"num_steps\": 100,\n \"splits\": [\n \"eval\"\n ]\n}" } } } parameters { key: "module_path" value { field_value { string_value: "trainer@output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl" } } } parameters { key: "train_args" value { field_value { string_value: "{\n \"num_steps\": 1000,\n \"splits\": [\n \"train\"\n ]\n}" } } } } upstream_nodes: "SchemaGen" upstream_nodes: "Transform" upstream_nodes: "Tuner" downstream_nodes: "Evaluator" downstream_nodes: "Pusher" execution_options { caching_options { enable_cache: true } } , pipeline_info=id: "mkavaldo-pipeline" , pipeline_run_id='20240905-192334.648142') WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE INFO:absl:udf_utils.get_fn {'eval_args': '{\n "num_steps": 250,\n "splits": [\n "eval"\n ]\n}', 'module_path': 'trainer@output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl', 'custom_config': 'null', 'train_args': '{\n "num_steps": 1000,\n "splits": [\n "train"\n ]\n}'} 'run_fn' INFO:absl:Installing 'output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl' to a temporary directory. INFO:absl:Executing: ['c:\\Users\\mkavaldo\\anaconda3\\envs\\mlops-2\\python.exe', '-m', 'pip', 'install', '--target', 'C:\\Users\\mkavaldo\\AppData\\Local\\Temp\\tmp7smn8nqu', 'output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl'] INFO:absl:Successfully installed 'output\\mkavaldo-pipeline\\_wheels\\tfx_user_code_Trainer-0.0+56572d4e8c14ac05bbb00442804984ef31a5f0f30a9e1f048a084d44e088d7ef-py3-none-any.whl'. INFO:absl:Training model.
Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== radius_mean_xf (InputLayer [(None, 1)] 0 [] ) texture_mean_xf (InputLaye [(None, 1)] 0 [] r) perimeter_mean_xf (InputLa [(None, 1)] 0 [] yer) area_mean_xf (InputLayer) [(None, 1)] 0 [] smoothness_mean_xf (InputL [(None, 1)] 0 [] ayer) compactness_mean_xf (Input [(None, 1)] 0 [] Layer) concavity_mean_xf (InputLa [(None, 1)] 0 [] yer) concave_points_mean_xf (In [(None, 1)] 0 [] putLayer) symmetry_mean_xf (InputLay [(None, 1)] 0 [] er) fractal_dimension_mean_xf [(None, 1)] 0 [] (InputLayer) radius_se_xf (InputLayer) [(None, 1)] 0 [] texture_se_xf (InputLayer) [(None, 1)] 0 [] perimeter_se_xf (InputLaye [(None, 1)] 0 [] r) area_se_xf (InputLayer) [(None, 1)] 0 [] smoothness_se_xf (InputLay [(None, 1)] 0 [] er) compactness_se_xf (InputLa [(None, 1)] 0 [] yer) concavity_se_xf (InputLaye [(None, 1)] 0 [] r) concave_points_se_xf (Inpu [(None, 1)] 0 [] tLayer) symmetry_se_xf (InputLayer [(None, 1)] 0 [] ) fractal_dimension_se_xf (I [(None, 1)] 0 [] nputLayer) radius_worst_xf (InputLaye [(None, 1)] 0 [] r) texture_worst_xf (InputLay [(None, 1)] 0 [] er) perimeter_worst_xf (InputL [(None, 1)] 0 [] ayer) area_worst_xf (InputLayer) [(None, 1)] 0 [] smoothness_worst_xf (Input [(None, 1)] 0 [] Layer) compactness_worst_xf (Inpu [(None, 1)] 0 [] tLayer) concavity_worst_xf (InputL [(None, 1)] 0 [] ayer) concave_points_worst_xf (I [(None, 1)] 0 [] nputLayer) symmetry_worst_xf (InputLa [(None, 1)] 0 [] yer) fractal_dimension_worst_xf [(None, 1)] 0 [] (InputLayer) concatenate_1 (Concatenate (None, 30) 0 ['radius_mean_xf[0][0]', ) 'texture_mean_xf[0][0]', 'perimeter_mean_xf[0][0]', 'area_mean_xf[0][0]', 'smoothness_mean_xf[0][0]', 'compactness_mean_xf[0][0]', 'concavity_mean_xf[0][0]', 'concave_points_mean_xf[0][0] ', 'symmetry_mean_xf[0][0]', 'fractal_dimension_mean_xf[0] [0]', 'radius_se_xf[0][0]', 'texture_se_xf[0][0]', 'perimeter_se_xf[0][0]', 'area_se_xf[0][0]', 'smoothness_se_xf[0][0]', 'compactness_se_xf[0][0]', 'concavity_se_xf[0][0]', 'concave_points_se_xf[0][0]', 'symmetry_se_xf[0][0]', 'fractal_dimension_se_xf[0][0 ]', 'radius_worst_xf[0][0]', 'texture_worst_xf[0][0]', 'perimeter_worst_xf[0][0]', 'area_worst_xf[0][0]', 'smoothness_worst_xf[0][0]', 'compactness_worst_xf[0][0]', 'concavity_worst_xf[0][0]', 'concave_points_worst_xf[0][0 ]', 'symmetry_worst_xf[0][0]', 'fractal_dimension_worst_xf[0 ][0]'] dense_4 (Dense) (None, 256) 7936 ['concatenate_1[0][0]'] batch_normalization_3 (Bat (None, 256) 1024 ['dense_4[0][0]'] chNormalization) dropout_3 (Dropout) (None, 256) 0 ['batch_normalization_3[0][0]' ] dense_5 (Dense) (None, 128) 32896 ['dropout_3[0][0]'] batch_normalization_4 (Bat (None, 128) 512 ['dense_5[0][0]'] chNormalization) dropout_4 (Dropout) (None, 128) 0 ['batch_normalization_4[0][0]' ] dense_6 (Dense) (None, 64) 8256 ['dropout_4[0][0]'] batch_normalization_5 (Bat (None, 64) 256 ['dense_6[0][0]'] chNormalization) dropout_5 (Dropout) (None, 64) 0 ['batch_normalization_5[0][0]' ] dense_7 (Dense) (None, 32) 2080 ['dropout_5[0][0]'] batch_normalization_6 (Bat (None, 32) 128 ['dense_7[0][0]'] chNormalization) dropout_6 (Dropout) (None, 32) 0 ['batch_normalization_6[0][0]' ] dense_8 (Dense) (None, 1) 33 ['dropout_6[0][0]'] ================================================================================================== Total params: 53121 (207.50 KB) Trainable params: 52161 (203.75 KB) Non-trainable params: 960 (3.75 KB) __________________________________________________________________________________________________ Epoch 1/20 100/100 [==============================] - 8s 33ms/step - loss: 0.5908 - accuracy: 0.7008 - val_loss: 0.4754 - val_accuracy: 0.8699 Epoch 2/20 100/100 [==============================] - 4s 44ms/step - loss: 0.3496 - accuracy: 0.8444 - val_loss: 0.2898 - val_accuracy: 0.9350 Epoch 3/20 100/100 [==============================] - 3s 34ms/step - loss: 0.2723 - accuracy: 0.8928 - val_loss: 0.1855 - val_accuracy: 0.9675 Epoch 4/20 100/100 [==============================] - 3s 26ms/step - loss: 0.2295 - accuracy: 0.9144 - val_loss: 0.1270 - val_accuracy: 1.0000 Epoch 5/20 100/100 [==============================] - 3s 27ms/step - loss: 0.1925 - accuracy: 0.9341 - val_loss: 0.0977 - val_accuracy: 1.0000 Epoch 6/20 100/100 [==============================] - 2s 24ms/step - loss: 0.1681 - accuracy: 0.9431 - val_loss: 0.0822 - val_accuracy: 1.0000 Epoch 7/20 100/100 [==============================] - 4s 36ms/step - loss: 0.1801 - accuracy: 0.9344 - val_loss: 0.0713 - val_accuracy: 1.0000 Epoch 8/20 100/100 [==============================] - 5s 46ms/step - loss: 0.1575 - accuracy: 0.9441 - val_loss: 0.0643 - val_accuracy: 1.0000 Epoch 9/20 100/100 [==============================] - 3s 29ms/step - loss: 0.1355 - accuracy: 0.9533 - val_loss: 0.0570 - val_accuracy: 1.0000 Epoch 10/20 100/100 [==============================] - 3s 26ms/step - loss: 0.1342 - accuracy: 0.9533 - val_loss: 0.0519 - val_accuracy: 1.0000 Epoch 11/20 100/100 [==============================] - 1s 12ms/step - loss: 0.1146 - accuracy: 0.9604 - val_loss: 0.0483 - val_accuracy: 1.0000 Epoch 12/20 100/100 [==============================] - 1s 12ms/step - loss: 0.1180 - accuracy: 0.9589 - val_loss: 0.0452 - val_accuracy: 1.0000 Epoch 13/20 100/100 [==============================] - 1s 12ms/step - loss: 0.1061 - accuracy: 0.9649 - val_loss: 0.0437 - val_accuracy: 0.9837 Epoch 14/20 100/100 [==============================] - 1s 13ms/step - loss: 0.0973 - accuracy: 0.9670 - val_loss: 0.0429 - val_accuracy: 0.9919 Epoch 15/20 100/100 [==============================] - 2s 18ms/step - loss: 0.1038 - accuracy: 0.9611 - val_loss: 0.0429 - val_accuracy: 0.9837 Epoch 16/20 100/100 [==============================] - 2s 21ms/step - loss: 0.0919 - accuracy: 0.9717 - val_loss: 0.0385 - val_accuracy: 0.9919 Epoch 17/20 100/100 [==============================] - 1s 13ms/step - loss: 0.0901 - accuracy: 0.9680 - val_loss: 0.0387 - val_accuracy: 0.9919 Epoch 18/20 100/100 [==============================] - 1s 12ms/step - loss: 0.0958 - accuracy: 0.9705 - val_loss: 0.0360 - val_accuracy: 0.9919 Epoch 19/20 100/100 [==============================] - 1s 12ms/step - loss: 0.0838 - accuracy: 0.9736 - val_loss: 0.0343 - val_accuracy: 0.9919 Epoch 20/20 100/100 [==============================] - 1s 13ms/step - loss: 0.0759 - accuracy: 0.9752 - val_loss: 0.0342 - val_accuracy: 0.9919
INFO:tensorflow:struct2tensor is not available.
INFO:tensorflow:tensorflow_decision_forests is not available.
INFO:tensorflow:tensorflow_decision_forests is not available.
INFO:tensorflow:tensorflow_text is not available.
INFO:tensorflow:tensorflow_text is not available.
INFO:tensorflow:Assets written to: output\mkavaldo-pipeline\Trainer\model\8\Format-Serving\assets
INFO:tensorflow:Assets written to: output\mkavaldo-pipeline\Trainer\model\8\Format-Serving\assets
INFO:absl:Training complete. Model written to output\mkavaldo-pipeline\Trainer\model\8\Format-Serving. ModelRun written to output\mkavaldo-pipeline\Trainer\model_run\8 INFO:absl:Cleaning up stateless execution info. INFO:absl:Execution 8 succeeded. INFO:absl:Cleaning up stateful execution info. INFO:absl:Publishing output artifacts defaultdict(<class 'list'>, {'model': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" , artifact_type: name: "Model" base_type: MODEL )], 'model_run': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Trainer\\model_run\\8" , artifact_type: name: "ModelRun" )]}) for execution 8 INFO:absl:MetadataStore with DB connection initialized INFO:absl:node Trainer is finished. INFO:absl:node Evaluator is running. INFO:absl:Running launcher for node_info { type { name: "tfx.components.evaluator.component.Evaluator" base_type: EVALUATE } id: "Evaluator" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Evaluator" } } } } inputs { inputs { key: "baseline_model" value { channels { producer_node_query { id: "Latest_blessed_model_resolver" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Latest_blessed_model_resolver" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } inputs { key: "examples" value { channels { producer_node_query { id: "CsvExampleGen" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.CsvExampleGen" } } } artifact_query { type { name: "Examples" base_type: DATASET } } output_key: "examples" } min_count: 1 } } inputs { key: "model" value { channels { producer_node_query { id: "Trainer" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } } outputs { outputs { key: "blessing" value { artifact_spec { type { name: "ModelBlessing" } } } } outputs { key: "evaluation" value { artifact_spec { type { name: "ModelEvaluation" } } } } } parameters { parameters { key: "eval_config" value { field_value { string_value: "{\n \"metrics_specs\": [\n {\n \"metrics\": [\n {\n \"class_name\": \"AUC\"\n },\n {\n \"class_name\": \"Precision\"\n },\n {\n \"class_name\": \"Recall\"\n },\n {\n \"class_name\": \"ExampleCount\"\n },\n {\n \"class_name\": \"BinaryAccuracy\",\n \"threshold\": {\n \"change_threshold\": {\n \"absolute\": 0.0001,\n \"direction\": \"HIGHER_IS_BETTER\"\n },\n \"value_threshold\": {\n \"lower_bound\": 0.8\n }\n }\n }\n ]\n }\n ],\n \"model_specs\": [\n {\n \"label_key\": \"HiringDecision\"\n }\n ],\n \"slicing_specs\": [\n {}\n ]\n}" } } } parameters { key: "example_splits" value { field_value { string_value: "null" } } } parameters { key: "fairness_indicator_thresholds" value { field_value { string_value: "null" } } } } upstream_nodes: "CsvExampleGen" upstream_nodes: "Latest_blessed_model_resolver" upstream_nodes: "Trainer" downstream_nodes: "Pusher" execution_options { caching_options { enable_cache: true } } INFO:absl:MetadataStore with DB connection initialized WARNING:absl:ArtifactQuery.property_predicate is not supported. WARNING:absl:ArtifactQuery.property_predicate is not supported. INFO:absl:[Evaluator] Resolved inputs: ({'model': [Artifact(artifact: id: 15 type_id: 30 uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515482031 last_update_time_since_epoch: 1722515482031 , artifact_type: id: 30 name: "Model" base_type: MODEL )], 'examples': [Artifact(artifact: id: 1 type_id: 16 uri: "output\\mkavaldo-pipeline\\CsvExampleGen\\examples\\2" properties { key: "split_names" value { string_value: "[\"train\", \"eval\"]" } } custom_properties { key: "file_format" value { string_value: "tfrecords_gzip" } } custom_properties { key: "input_fingerprint" value { string_value: "split:single_split,num_files:1,total_bytes:63964,xor_checksum:1718633630,sum_checksum:1718633630" } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "payload_format" value { string_value: "FORMAT_TF_EXAMPLE" } } custom_properties { key: "span" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515018138 last_update_time_since_epoch: 1722515018138 , artifact_type: id: 16 name: "Examples" properties { key: "span" value: INT } properties { key: "split_names" value: STRING } properties { key: "version" value: INT } base_type: DATASET )], 'baseline_model': []},) INFO:absl:MetadataStore with DB connection initialized INFO:absl:Going to run a new execution 9 INFO:absl:Going to run a new execution: ExecutionInfo(execution_id=9, input_dict={'model': [Artifact(artifact: id: 15 type_id: 30 uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515482031 last_update_time_since_epoch: 1722515482031 , artifact_type: id: 30 name: "Model" base_type: MODEL )], 'examples': [Artifact(artifact: id: 1 type_id: 16 uri: "output\\mkavaldo-pipeline\\CsvExampleGen\\examples\\2" properties { key: "split_names" value { string_value: "[\"train\", \"eval\"]" } } custom_properties { key: "file_format" value { string_value: "tfrecords_gzip" } } custom_properties { key: "input_fingerprint" value { string_value: "split:single_split,num_files:1,total_bytes:63964,xor_checksum:1718633630,sum_checksum:1718633630" } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "payload_format" value { string_value: "FORMAT_TF_EXAMPLE" } } custom_properties { key: "span" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515018138 last_update_time_since_epoch: 1722515018138 , artifact_type: id: 16 name: "Examples" properties { key: "span" value: INT } properties { key: "split_names" value: STRING } properties { key: "version" value: INT } base_type: DATASET )], 'baseline_model': []}, output_dict=defaultdict(<class 'list'>, {'blessing': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Evaluator\\blessing\\9" , artifact_type: name: "ModelBlessing" )], 'evaluation': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Evaluator\\evaluation\\9" , artifact_type: name: "ModelEvaluation" )]}), exec_properties={'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'eval_config': '{\n "metrics_specs": [\n {\n "metrics": [\n {\n "class_name": "AUC"\n },\n {\n "class_name": "Precision"\n },\n {\n "class_name": "Recall"\n },\n {\n "class_name": "ExampleCount"\n },\n {\n "class_name": "BinaryAccuracy",\n "threshold": {\n "change_threshold": {\n "absolute": 0.0001,\n "direction": "HIGHER_IS_BETTER"\n },\n "value_threshold": {\n "lower_bound": 0.8\n }\n }\n }\n ]\n }\n ],\n "model_specs": [\n {\n "label_key": "HiringDecision"\n }\n ],\n "slicing_specs": [\n {}\n ]\n}'}, execution_output_uri='output\\mkavaldo-pipeline\\Evaluator\\.system\\executor_execution\\9\\executor_output.pb', stateful_working_dir='output\\mkavaldo-pipeline\\Evaluator\\.system\\stateful_working_dir\\20240905-192334.648142', tmp_dir='output\\mkavaldo-pipeline\\Evaluator\\.system\\executor_execution\\9\\.temp\\', pipeline_node=node_info { type { name: "tfx.components.evaluator.component.Evaluator" base_type: EVALUATE } id: "Evaluator" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Evaluator" } } } } inputs { inputs { key: "baseline_model" value { channels { producer_node_query { id: "Latest_blessed_model_resolver" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Latest_blessed_model_resolver" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } inputs { key: "examples" value { channels { producer_node_query { id: "CsvExampleGen" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.CsvExampleGen" } } } artifact_query { type { name: "Examples" base_type: DATASET } } output_key: "examples" } min_count: 1 } } inputs { key: "model" value { channels { producer_node_query { id: "Trainer" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } } outputs { outputs { key: "blessing" value { artifact_spec { type { name: "ModelBlessing" } } } } outputs { key: "evaluation" value { artifact_spec { type { name: "ModelEvaluation" } } } } } parameters { parameters { key: "eval_config" value { field_value { string_value: "{\n \"metrics_specs\": [\n {\n \"metrics\": [\n {\n \"class_name\": \"AUC\"\n },\n {\n \"class_name\": \"Precision\"\n },\n {\n \"class_name\": \"Recall\"\n },\n {\n \"class_name\": \"ExampleCount\"\n },\n {\n \"class_name\": \"BinaryAccuracy\",\n \"threshold\": {\n \"change_threshold\": {\n \"absolute\": 0.0001,\n \"direction\": \"HIGHER_IS_BETTER\"\n },\n \"value_threshold\": {\n \"lower_bound\": 0.8\n }\n }\n }\n ]\n }\n ],\n \"model_specs\": [\n {\n \"label_key\": \"HiringDecision\"\n }\n ],\n \"slicing_specs\": [\n {}\n ]\n}" } } } parameters { key: "example_splits" value { field_value { string_value: "null" } } } parameters { key: "fairness_indicator_thresholds" value { field_value { string_value: "null" } } } } upstream_nodes: "CsvExampleGen" upstream_nodes: "Latest_blessed_model_resolver" upstream_nodes: "Trainer" downstream_nodes: "Pusher" execution_options { caching_options { enable_cache: true } } , pipeline_info=id: "mkavaldo-pipeline" , pipeline_run_id='20240905-192334.648142') INFO:absl:udf_utils.get_fn {'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'eval_config': '{\n "metrics_specs": [\n {\n "metrics": [\n {\n "class_name": "AUC"\n },\n {\n "class_name": "Precision"\n },\n {\n "class_name": "Recall"\n },\n {\n "class_name": "ExampleCount"\n },\n {\n "class_name": "BinaryAccuracy",\n "threshold": {\n "change_threshold": {\n "absolute": 0.0001,\n "direction": "HIGHER_IS_BETTER"\n },\n "value_threshold": {\n "lower_bound": 0.8\n }\n }\n }\n ]\n }\n ],\n "model_specs": [\n {\n "label_key": "HiringDecision"\n }\n ],\n "slicing_specs": [\n {}\n ]\n}'} 'custom_eval_shared_model' INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config= model_specs { label_key: "HiringDecision" } slicing_specs { } metrics_specs { metrics { class_name: "AUC" } metrics { class_name: "Precision" } metrics { class_name: "Recall" } metrics { class_name: "ExampleCount" } metrics { class_name: "BinaryAccuracy" threshold { value_threshold { lower_bound { value: 0.8 } } } } } INFO:absl:Using output\mkavaldo-pipeline\Trainer\model\8\Format-Serving as model.
WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(<keras.saving.saved_model.load.TensorFlowTransform>TransformFeaturesLayer object at 0x000001C0CF557340> and <keras.engine.input_layer.InputLayer object at 0x000001C0C2F79EB0>).
WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(<keras.saving.saved_model.load.TensorFlowTransform>TransformFeaturesLayer object at 0x000001C0CF557340> and <keras.engine.input_layer.InputLayer object at 0x000001C0C2F79EB0>). INFO:absl:The 'example_splits' parameter is not set, using 'eval' split. INFO:absl:Evaluating model. INFO:absl:udf_utils.get_fn {'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'eval_config': '{\n "metrics_specs": [\n {\n "metrics": [\n {\n "class_name": "AUC"\n },\n {\n "class_name": "Precision"\n },\n {\n "class_name": "Recall"\n },\n {\n "class_name": "ExampleCount"\n },\n {\n "class_name": "BinaryAccuracy",\n "threshold": {\n "change_threshold": {\n "absolute": 0.0001,\n "direction": "HIGHER_IS_BETTER"\n },\n "value_threshold": {\n "lower_bound": 0.8\n }\n }\n }\n ]\n }\n ],\n "model_specs": [\n {\n "label_key": "HiringDecision"\n }\n ],\n "slicing_specs": [\n {}\n ]\n}'} 'custom_extractors' INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config= model_specs { label_key: "HiringDecision" } slicing_specs { } metrics_specs { metrics { class_name: "AUC" } metrics { class_name: "Precision" } metrics { class_name: "Recall" } metrics { class_name: "ExampleCount" } metrics { class_name: "BinaryAccuracy" threshold { value_threshold { lower_bound { value: 0.8 } } } } model_names: "" } INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config= model_specs { label_key: "HiringDecision" } slicing_specs { } metrics_specs { metrics { class_name: "AUC" } metrics { class_name: "Precision" } metrics { class_name: "Recall" } metrics { class_name: "ExampleCount" } metrics { class_name: "BinaryAccuracy" threshold { value_threshold { lower_bound { value: 0.8 } } } } model_names: "" } INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config= model_specs { label_key: "HiringDecision" } slicing_specs { } metrics_specs { metrics { class_name: "AUC" } metrics { class_name: "Precision" } metrics { class_name: "Recall" } metrics { class_name: "ExampleCount" } metrics { class_name: "BinaryAccuracy" threshold { value_threshold { lower_bound { value: 0.8 } } } } model_names: "" }
WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. For example, in the saved checkpoint object, `model.layer.weight` and `model.layer_copy.weight` reference the same variable, while in the current object these are two different variables. The referenced variables are:(<keras.saving.saved_model.load.TensorFlowTransform>TransformFeaturesLayer object at 0x000001C0C66B0490> and <keras.engine.input_layer.InputLayer object at 0x000001C0C2EEB850>). INFO:absl:Evaluation complete. Results written to output\mkavaldo-pipeline\Evaluator\evaluation\9. INFO:absl:Checking validation results.
WARNING:tensorflow:From c:\Users\mkavaldo\anaconda3\envs\mlops-2\lib\site-packages\tensorflow_model_analysis\writers\metrics_plots_and_validations_writer.py:110: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version. Instructions for updating: Use eager execution and: `tf.data.TFRecordDataset(path)`
WARNING:tensorflow:From c:\Users\mkavaldo\anaconda3\envs\mlops-2\lib\site-packages\tensorflow_model_analysis\writers\metrics_plots_and_validations_writer.py:110: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version. Instructions for updating: Use eager execution and: `tf.data.TFRecordDataset(path)` INFO:absl:Blessing result True written to output\mkavaldo-pipeline\Evaluator\blessing\9. INFO:absl:Cleaning up stateless execution info. INFO:absl:Execution 9 succeeded. INFO:absl:Cleaning up stateful execution info. INFO:absl:Publishing output artifacts defaultdict(<class 'list'>, {'blessing': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Evaluator\\blessing\\9" , artifact_type: name: "ModelBlessing" )], 'evaluation': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Evaluator\\evaluation\\9" , artifact_type: name: "ModelEvaluation" )]}) for execution 9 INFO:absl:MetadataStore with DB connection initialized INFO:absl:node Evaluator is finished. INFO:absl:node Pusher is running. INFO:absl:Running launcher for node_info { type { name: "tfx.components.pusher.component.Pusher" base_type: DEPLOY } id: "Pusher" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Pusher" } } } } inputs { inputs { key: "model" value { channels { producer_node_query { id: "Trainer" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } inputs { key: "model_blessing" value { channels { producer_node_query { id: "Evaluator" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Evaluator" } } } artifact_query { type { name: "ModelBlessing" } } output_key: "blessing" } } } } outputs { outputs { key: "pushed_model" value { artifact_spec { type { name: "PushedModel" base_type: MODEL } } } } } parameters { parameters { key: "custom_config" value { field_value { string_value: "null" } } } parameters { key: "push_destination" value { field_value { string_value: "{\n \"filesystem\": {\n \"base_directory\": \"output\\\\serving_model\"\n }\n}" } } } } upstream_nodes: "Evaluator" upstream_nodes: "Trainer" execution_options { caching_options { enable_cache: true } } INFO:absl:MetadataStore with DB connection initialized WARNING:absl:ArtifactQuery.property_predicate is not supported. WARNING:absl:ArtifactQuery.property_predicate is not supported. INFO:absl:[Pusher] Resolved inputs: ({'model': [Artifact(artifact: id: 15 type_id: 30 uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515482031 last_update_time_since_epoch: 1722515482031 , artifact_type: id: 30 name: "Model" base_type: MODEL )], 'model_blessing': [Artifact(artifact: id: 17 type_id: 33 uri: "output\\mkavaldo-pipeline\\Evaluator\\blessing\\9" custom_properties { key: "blessed" value { int_value: 1 } } custom_properties { key: "current_model" value { string_value: "output\\mkavaldo-pipeline\\Trainer\\model\\8" } } custom_properties { key: "current_model_id" value { int_value: 15 } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515494914 last_update_time_since_epoch: 1722515494914 , artifact_type: id: 33 name: "ModelBlessing" )]},) INFO:absl:MetadataStore with DB connection initialized INFO:absl:Going to run a new execution 10 INFO:absl:Going to run a new execution: ExecutionInfo(execution_id=10, input_dict={'model': [Artifact(artifact: id: 15 type_id: 30 uri: "output\\mkavaldo-pipeline\\Trainer\\model\\8" custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515482031 last_update_time_since_epoch: 1722515482031 , artifact_type: id: 30 name: "Model" base_type: MODEL )], 'model_blessing': [Artifact(artifact: id: 17 type_id: 33 uri: "output\\mkavaldo-pipeline\\Evaluator\\blessing\\9" custom_properties { key: "blessed" value { int_value: 1 } } custom_properties { key: "current_model" value { string_value: "output\\mkavaldo-pipeline\\Trainer\\model\\8" } } custom_properties { key: "current_model_id" value { int_value: 15 } } custom_properties { key: "is_external" value { int_value: 0 } } custom_properties { key: "state" value { string_value: "published" } } custom_properties { key: "tfx_version" value { string_value: "1.11.0" } } state: LIVE create_time_since_epoch: 1722515494914 last_update_time_since_epoch: 1722515494914 , artifact_type: id: 33 name: "ModelBlessing" )]}, output_dict=defaultdict(<class 'list'>, {'pushed_model': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Pusher\\pushed_model\\10" , artifact_type: name: "PushedModel" base_type: MODEL )]}), exec_properties={'push_destination': '{\n "filesystem": {\n "base_directory": "output\\\\serving_model"\n }\n}', 'custom_config': 'null'}, execution_output_uri='output\\mkavaldo-pipeline\\Pusher\\.system\\executor_execution\\10\\executor_output.pb', stateful_working_dir='output\\mkavaldo-pipeline\\Pusher\\.system\\stateful_working_dir\\20240905-192334.648142', tmp_dir='output\\mkavaldo-pipeline\\Pusher\\.system\\executor_execution\\10\\.temp\\', pipeline_node=node_info { type { name: "tfx.components.pusher.component.Pusher" base_type: DEPLOY } id: "Pusher" } contexts { contexts { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } contexts { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } contexts { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Pusher" } } } } inputs { inputs { key: "model" value { channels { producer_node_query { id: "Trainer" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Trainer" } } } artifact_query { type { name: "Model" base_type: MODEL } } output_key: "model" } } } inputs { key: "model_blessing" value { channels { producer_node_query { id: "Evaluator" } context_queries { type { name: "pipeline" } name { field_value { string_value: "mkavaldo-pipeline" } } } context_queries { type { name: "pipeline_run" } name { field_value { string_value: "20240905-192334.648142" } } } context_queries { type { name: "node" } name { field_value { string_value: "mkavaldo-pipeline.Evaluator" } } } artifact_query { type { name: "ModelBlessing" } } output_key: "blessing" } } } } outputs { outputs { key: "pushed_model" value { artifact_spec { type { name: "PushedModel" base_type: MODEL } } } } } parameters { parameters { key: "custom_config" value { field_value { string_value: "null" } } } parameters { key: "push_destination" value { field_value { string_value: "{\n \"filesystem\": {\n \"base_directory\": \"output\\\\serving_model\"\n }\n}" } } } } upstream_nodes: "Evaluator" upstream_nodes: "Trainer" execution_options { caching_options { enable_cache: true } } , pipeline_info=id: "mkavaldo-pipeline" , pipeline_run_id='20240905-192334.648142') INFO:absl:Model version: 1725511645 INFO:absl:Model written to serving path output\serving_model\1725511645. INFO:absl:Model pushed to output\mkavaldo-pipeline\Pusher\pushed_model\10. INFO:absl:Cleaning up stateless execution info. INFO:absl:Execution 10 succeeded. INFO:absl:Cleaning up stateful execution info. INFO:absl:Publishing output artifacts defaultdict(<class 'list'>, {'pushed_model': [Artifact(artifact: uri: "output\\mkavaldo-pipeline\\Pusher\\pushed_model\\10" , artifact_type: name: "PushedModel" base_type: MODEL )]}) for execution 10 INFO:absl:MetadataStore with DB connection initialized INFO:absl:node Pusher is finished.
✅ Berhasil menjalankan Pipeline.