@@ -787,3 +787,248 @@ def test_queue_position_string_from_run_with_only_platform_position() -> None:
787787 num_preceding_items_platform = 15 ,
788788 )
789789 assert queue_position_string_from_run (run ) == "15 items ahead across the entire platform"
790+
791+
792+ # Tests for retrieve_and_print_run_details with summarize option
793+
794+
795+ @pytest .mark .unit
796+ @patch ("aignostics.application._utils.console" )
797+ def test_retrieve_and_print_run_details_summarize_mode (mock_console : Mock ) -> None :
798+ """Test summarize mode shows concise output with external ID, state, and errors."""
799+ submitted_at = datetime (2025 , 1 , 1 , 12 , 0 , 0 , tzinfo = UTC )
800+ terminated_at = datetime (2025 , 1 , 1 , 13 , 0 , 0 , tzinfo = UTC )
801+
802+ run_data = RunData (
803+ run_id = "run-summarize-test" ,
804+ application_id = "he-tme" ,
805+ version_number = "1.0.0" ,
806+ state = RunState .TERMINATED ,
807+ termination_reason = RunTerminationReason .ALL_ITEMS_PROCESSED ,
808+ output = RunOutput .FULL ,
809+ statistics = RunItemStatistics (
810+ item_count = 2 ,
811+ item_pending_count = 0 ,
812+ item_processing_count = 0 ,
813+ item_skipped_count = 0 ,
814+ item_succeeded_count = 1 ,
815+ item_user_error_count = 1 ,
816+ item_system_error_count = 0 ,
817+ ),
818+ submitted_at = submitted_at ,
819+ submitted_by = "user@example.com" ,
820+ terminated_at = terminated_at ,
821+ custom_metadata = None ,
822+ error_message = None ,
823+ error_code = None ,
824+ )
825+
826+ from aignx .codegen .models import ItemOutput
827+
828+ item_success = ItemResult (
829+ item_id = "item-001" ,
830+ external_id = "slide-success.svs" ,
831+ state = ItemState .TERMINATED ,
832+ termination_reason = ItemTerminationReason .SUCCEEDED ,
833+ output = ItemOutput .FULL ,
834+ error_message = None ,
835+ error_code = None ,
836+ custom_metadata = None ,
837+ custom_metadata_checksum = None ,
838+ terminated_at = terminated_at ,
839+ output_artifacts = [],
840+ )
841+
842+ item_error = ItemResult (
843+ item_id = "item-002" ,
844+ external_id = "slide-error.svs" ,
845+ state = ItemState .TERMINATED ,
846+ termination_reason = ItemTerminationReason .USER_ERROR ,
847+ output = ItemOutput .NONE ,
848+ error_message = "Invalid file format" ,
849+ error_code = "INVALID_FORMAT" ,
850+ custom_metadata = None ,
851+ custom_metadata_checksum = None ,
852+ terminated_at = terminated_at ,
853+ output_artifacts = [],
854+ )
855+
856+ mock_run = MagicMock ()
857+ mock_run .details .return_value = run_data
858+ mock_run .results .return_value = [item_success , item_error ]
859+
860+ retrieve_and_print_run_details (mock_run , hide_platform_queue_position = False , summarize = True )
861+
862+ # Collect all printed output
863+ all_output = " " .join (str (call ) for call in mock_console .print .call_args_list )
864+
865+ # Verify run details header is present
866+ assert "Run Details for run-summarize-test" in all_output
867+ # Verify application info is present
868+ assert "he-tme" in all_output
869+ # Verify items are listed with external IDs
870+ assert "slide-success.svs" in all_output
871+ assert "slide-error.svs" in all_output
872+ # Verify error message is shown for failed item
873+ assert "Invalid file format" in all_output
874+ # Verify artifact details are NOT shown (they are omitted in summary)
875+ assert "Download URL" not in all_output
876+ assert "Artifact ID" not in all_output
877+
878+
879+ @pytest .mark .unit
880+ @patch ("aignostics.application._utils.console" )
881+ def test_retrieve_and_print_run_details_summarize_no_items (mock_console : Mock ) -> None :
882+ """Test summarize mode with no items shows appropriate message."""
883+ submitted_at = datetime (2025 , 1 , 1 , 12 , 0 , 0 , tzinfo = UTC )
884+
885+ run_data = RunData (
886+ run_id = "run-no-items" ,
887+ application_id = "test-app" ,
888+ version_number = "0.0.1" ,
889+ state = RunState .PENDING ,
890+ termination_reason = None ,
891+ output = RunOutput .NONE ,
892+ statistics = RunItemStatistics (
893+ item_count = 0 ,
894+ item_pending_count = 0 ,
895+ item_processing_count = 0 ,
896+ item_skipped_count = 0 ,
897+ item_succeeded_count = 0 ,
898+ item_user_error_count = 0 ,
899+ item_system_error_count = 0 ,
900+ ),
901+ submitted_at = submitted_at ,
902+ submitted_by = "user@example.com" ,
903+ terminated_at = None ,
904+ custom_metadata = None ,
905+ error_message = None ,
906+ error_code = None ,
907+ )
908+
909+ mock_run = MagicMock ()
910+ mock_run .details .return_value = run_data
911+ mock_run .results .return_value = []
912+
913+ retrieve_and_print_run_details (mock_run , hide_platform_queue_position = False , summarize = True )
914+
915+ all_output = " " .join (str (call ) for call in mock_console .print .call_args_list )
916+ assert "Run Details for run-no-items" in all_output
917+ assert "No item results available" in all_output
918+
919+
920+ @pytest .mark .unit
921+ @patch ("aignostics.application._utils.console" )
922+ def test_retrieve_and_print_run_details_summarize_with_run_error (mock_console : Mock ) -> None :
923+ """Test summarize mode shows run-level errors."""
924+ submitted_at = datetime (2025 , 1 , 1 , 12 , 0 , 0 , tzinfo = UTC )
925+ terminated_at = datetime (2025 , 1 , 1 , 12 , 5 , 0 , tzinfo = UTC )
926+
927+ run_data = RunData (
928+ run_id = "run-with-error" ,
929+ application_id = "test-app" ,
930+ version_number = "0.0.1" ,
931+ state = RunState .TERMINATED ,
932+ termination_reason = RunTerminationReason .CANCELED_BY_SYSTEM ,
933+ output = RunOutput .NONE ,
934+ statistics = RunItemStatistics (
935+ item_count = 1 ,
936+ item_pending_count = 0 ,
937+ item_processing_count = 0 ,
938+ item_skipped_count = 0 ,
939+ item_succeeded_count = 0 ,
940+ item_user_error_count = 0 ,
941+ item_system_error_count = 1 ,
942+ ),
943+ submitted_at = submitted_at ,
944+ submitted_by = "user@example.com" ,
945+ terminated_at = terminated_at ,
946+ custom_metadata = None ,
947+ error_message = "System error occurred" ,
948+ error_code = "SYS_ERROR" ,
949+ )
950+
951+ mock_run = MagicMock ()
952+ mock_run .details .return_value = run_data
953+ mock_run .results .return_value = []
954+
955+ retrieve_and_print_run_details (mock_run , hide_platform_queue_position = False , summarize = True )
956+
957+ all_output = " " .join (str (call ) for call in mock_console .print .call_args_list )
958+ assert "System error occurred" in all_output
959+ assert "SYS_ERROR" in all_output
960+
961+
962+ @pytest .mark .unit
963+ @patch ("aignostics.application._utils.console" )
964+ def test_retrieve_and_print_run_details_default_is_detailed (mock_console : Mock ) -> None :
965+ """Test that default mode (summarize=False) shows detailed output with artifacts."""
966+ submitted_at = datetime (2025 , 1 , 1 , 12 , 0 , 0 , tzinfo = UTC )
967+ terminated_at = datetime (2025 , 1 , 1 , 13 , 0 , 0 , tzinfo = UTC )
968+
969+ run_data = RunData (
970+ run_id = "run-detailed-test" ,
971+ application_id = "he-tme" ,
972+ version_number = "1.0.0" ,
973+ state = RunState .TERMINATED ,
974+ termination_reason = RunTerminationReason .ALL_ITEMS_PROCESSED ,
975+ output = RunOutput .FULL ,
976+ statistics = RunItemStatistics (
977+ item_count = 1 ,
978+ item_pending_count = 0 ,
979+ item_processing_count = 0 ,
980+ item_skipped_count = 0 ,
981+ item_succeeded_count = 1 ,
982+ item_user_error_count = 0 ,
983+ item_system_error_count = 0 ,
984+ ),
985+ submitted_at = submitted_at ,
986+ submitted_by = "user@example.com" ,
987+ terminated_at = terminated_at ,
988+ custom_metadata = None ,
989+ error_message = None ,
990+ error_code = None ,
991+ )
992+
993+ from aignx .codegen .models import ArtifactOutput , ArtifactState , ArtifactTerminationReason , ItemOutput
994+
995+ item_result = ItemResult (
996+ item_id = "item-123" ,
997+ external_id = "slide-001.svs" ,
998+ state = ItemState .TERMINATED ,
999+ termination_reason = ItemTerminationReason .SUCCEEDED ,
1000+ output = ItemOutput .FULL ,
1001+ error_message = None ,
1002+ error_code = None ,
1003+ custom_metadata = None ,
1004+ custom_metadata_checksum = None ,
1005+ terminated_at = terminated_at ,
1006+ output_artifacts = [
1007+ OutputArtifactElement (
1008+ output_artifact_id = "artifact-abc" ,
1009+ name = "result.parquet" ,
1010+ download_url = "https://example.com/result.parquet" ,
1011+ metadata = {"media_type" : "application/vnd.apache.parquet" },
1012+ state = ArtifactState .TERMINATED ,
1013+ termination_reason = ArtifactTerminationReason .SUCCEEDED ,
1014+ output = ArtifactOutput .AVAILABLE ,
1015+ error_code = None ,
1016+ error_message = None ,
1017+ )
1018+ ],
1019+ )
1020+
1021+ mock_run = MagicMock ()
1022+ mock_run .details .return_value = run_data
1023+ mock_run .results .return_value = [item_result ]
1024+
1025+ # Call without summarize parameter (default is False)
1026+ retrieve_and_print_run_details (mock_run , hide_platform_queue_position = False )
1027+
1028+ all_output = " " .join (str (call ) for call in mock_console .print .call_args_list )
1029+
1030+ # Verify detailed output shows "Run Details" not "Run Summary"
1031+ assert "Run Details for run-detailed-test" in all_output
1032+ # Verify artifact details ARE shown in detailed mode
1033+ assert "Download URL" in all_output
1034+ assert "Artifact ID" in all_output
0 commit comments