diff --git a/.streamlit/config.toml b/.streamlit/config.toml new file mode 100644 index 0000000..847402d --- /dev/null +++ b/.streamlit/config.toml @@ -0,0 +1,10 @@ +[server] +# WebSocketの圧縮を無効化(接続の安定性が向上し、不意な切断が減ります) +# 長時間操作しないと、ネットワーク機器やブラウザが「この通信はもう終わったのかな?」と勘違いして、勝手に切断してしまう +# 圧縮をやめることで、通信が単純かつ頻繁になり、「まだ繋がってますよ!」というアピールが強くなるため、勝手に切断されにくくなる +enableWebsocketCompression = false + +[browser] +# 統計データの送信を停止(動作が少し軽くなります) +# 研究用ツールとしては不要な通信であり、これが原因で動作が重くなったり不安定になったりするのを防ぐ +gatherUsageStats = false \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 4a15dc1..5330bd9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ streamlit>=1.28.0 pandas>=2.0.0 Pillow>=10.0.0 +pytest openpyxl>=3.1.0 watchdog>=3.0.0 diff --git a/start_mac.command b/start_mac.command new file mode 100755 index 0000000..ed996b2 --- /dev/null +++ b/start_mac.command @@ -0,0 +1,16 @@ +# chmod +x start_mac.commandの実装が必須 + +#!/bin/bash + +# カレントディレクトリをこのファイルの場所に移動 +cd "$(dirname "$0")" + +echo "==========================================" +echo " IVUS Annotation Tool (Mac/Linux)" +echo "==========================================" + +# 仮想環境(venv)がある場合は有効化する(エラーは無視) +source venv/bin/activate 2>/dev/null + +# アプリ起動 +streamlit run app.py \ No newline at end of file diff --git a/start_windows.bat b/start_windows.bat new file mode 100644 index 0000000..d75f9bc --- /dev/null +++ b/start_windows.bat @@ -0,0 +1,16 @@ +@echo off +:: カレントディレクトリをこのファイルの場所に移動 +cd /d %~dp0 + +echo ========================================== +echo IVUS Annotation Tool (Windows) +echo ========================================== + +:: 仮想環境(venv)がある場合は有効化する(なければ無視して進む) +if exist venv\Scripts\activate.bat call venv\Scripts\activate.bat + +:: アプリ起動 +streamlit run app.py + +:: エラーで落ちたときにすぐ閉じないようにする +pause \ No newline at end of file diff --git a/tests/test_app_logic.py b/tests/test_app_logic.py new file mode 100644 index 0000000..9d44b65 --- /dev/null +++ b/tests/test_app_logic.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +""" +Test suite for app navigation and annotation logic. +Tests app behavior without running Streamlit. +""" + +import pytest +from types import SimpleNamespace + + +# 簡易的なMockSessionState +# SimpleNamespaceを使うと obj.attr = value のように書けるので便利 +class MockState(SimpleNamespace): + pass + + +def test_navigation_next_unannotated(): + """ + 【基本動作】保存後、次の未完了症例に進むか + """ + # 状態の準備 + state = MockState( + current_case_idx=0, + case_ids=[101, 102, 103], + annotated_cases=set() + ) + + # 1. 現在の症例(101)を保存したと仮定 + current_id = state.case_ids[state.current_case_idx] + state.annotated_cases.add(current_id) + + # --- アプリのロジック(簡易再現) --- + next_idx = state.current_case_idx + 1 + found = False + while next_idx < len(state.case_ids): + if state.case_ids[next_idx] not in state.annotated_cases: + state.current_case_idx = next_idx + found = True + break + next_idx += 1 + # ---------------------------------- + + # 検証: idxが1(102)に進んでいること + assert found is True + assert state.current_case_idx == 1 + assert state.case_ids[state.current_case_idx] == 102 + + +def test_navigation_skip_annotated(): + """ + 【スキップ動作】完了済み症例を飛ばして、その次の未完了に進むか + """ + # 状態: 101(今), 102(完了済), 103(未完了) + state = MockState( + current_case_idx=0, + case_ids=[101, 102, 103], + annotated_cases={102} + ) + + # 1. 現在の症例(101)を保存 + current_id = state.case_ids[state.current_case_idx] + state.annotated_cases.add(current_id) + + # --- アプリのロジック --- + next_idx = state.current_case_idx + 1 + found = False + while next_idx < len(state.case_ids): + if state.case_ids[next_idx] not in state.annotated_cases: + state.current_case_idx = next_idx + found = True + break + next_idx += 1 + # --------------------- + + # 検証: idxが2(103)まで飛んでいること + assert found is True + assert state.current_case_idx == 2 + assert state.case_ids[state.current_case_idx] == 103 + + +def test_completion_logic(): + """ + 【完了判定】全症例終わったら完了フラグが立つか + """ + state = MockState( + case_ids=[101, 102], + annotated_cases={101, 102} + ) + + # 完了判定ロジック + is_complete = len(state.annotated_cases) >= len(state.case_ids) + + assert is_complete is True diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..0f0a500 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- +""" +Test suite for annotation saving utilities. +Tests data persistence, CSV encoding, and append behavior. +""" + +import os +import csv +import tempfile +import shutil +import pytest +import pandas as pd +from pathlib import Path + +# Import functions from utils module +import sys +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from utils.annotation_saver import ( + initialize_csv, + save_annotation, + get_annotated_cases +) + + +@pytest.fixture +def setup_teardown(): + """ + Create a temporary directory for each test and clean up afterward. + + Yields: + str: Path to temporary directory + """ + # Create temporary directory + temp_dir = tempfile.mkdtemp() + + yield temp_dir + + # Cleanup: Remove temporary directory after test + shutil.rmtree(temp_dir) + + +def test_new_file_creation(setup_teardown): + """ + Test 1: CSV file creation with correct headers. + + Verifies that: + - CSV file is created when it doesn't exist + - Header row is correctly written + - First data row is correctly saved + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_test.csv") + + # Verify file doesn't exist yet + assert not os.path.exists(csv_path), "CSV file should not exist before saving" + + # Save first annotation + save_annotation( + csv_path=csv_path, + case_id=134, + prediction="あり", + confidence=75, + reasons=["石灰化プラークが多い", "減衰プラークが多い"], + comment="明確な所見あり", + annotator="田中", + ground_truth=True + ) + + # Verify file was created + assert os.path.exists(csv_path), "CSV file should exist after saving" + + # Read and verify contents + df = pd.read_csv(csv_path, encoding='utf-8-sig') + + # Check header columns + expected_columns = [ + 'timestamp', 'case_id', 'prediction', 'confidence', + 'reasons', 'comment', 'annotator', 'ground_truth' + ] + assert list(df.columns) == expected_columns, "CSV headers don't match expected" + + # Verify data row + assert len(df) == 1, "Should have exactly 1 data row" + assert df.loc[0, 'case_id'] == 134, "Case ID mismatch" + assert df.loc[0, 'prediction'] == "あり", "Prediction mismatch" + assert df.loc[0, 'confidence'] == 75, "Confidence mismatch" + assert df.loc[0, 'reasons'] == "石灰化プラークが多い; 減衰プラークが多い", "Reasons format incorrect" + assert df.loc[0, 'comment'] == "明確な所見あり", "Comment mismatch" + assert df.loc[0, 'annotator'] == "田中", "Annotator mismatch" + assert df.loc[0, 'ground_truth'] == True, "Ground truth mismatch" + assert 'timestamp' in df.columns and pd.notna(df.loc[0, 'timestamp']), "Timestamp missing" + + +def test_append_mode(setup_teardown): + """ + Test 2: Append functionality - ensure data is not overwritten. + + Verifies that: + - Second annotation is appended (not overwriting first) + - Both rows exist in correct order + - No data loss occurs + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_append_test.csv") + + # Save first annotation + save_annotation( + csv_path=csv_path, + case_id=134, + prediction="あり", + confidence=75, + reasons=["石灰化プラークが多い"], + comment="First annotation", + annotator="田中", + ground_truth=True + ) + + # Save second annotation + save_annotation( + csv_path=csv_path, + case_id=135, + prediction="なし", + confidence=50, + reasons=["石灰化プラークが少ない"], + comment="Second annotation", + annotator="田中", + ground_truth=False + ) + + # Read and verify both rows exist + df = pd.read_csv(csv_path, encoding='utf-8-sig') + + assert len(df) == 2, "Should have exactly 2 data rows after append" + + # Verify first row (should not be overwritten) + assert df.loc[0, 'case_id'] == 134, "First row case ID should remain" + assert df.loc[0, 'prediction'] == "あり", "First row prediction should remain" + assert df.loc[0, 'comment'] == "First annotation", "First row comment should remain" + + # Verify second row + assert df.loc[1, 'case_id'] == 135, "Second row case ID incorrect" + assert df.loc[1, 'prediction'] == "なし", "Second row prediction incorrect" + assert df.loc[1, 'comment'] == "Second annotation", "Second row comment incorrect" + assert df.loc[1, 'ground_truth'] == False, "Second row ground truth incorrect" + + +def test_japanese_encoding(setup_teardown): + """ + Test 3: UTF-8-sig encoding with Japanese characters. + + Verifies that: + - Japanese text is saved without corruption + - utf-8-sig encoding prevents BOM issues in Excel + - Data can be read back correctly + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_japanese_test.csv") + + # Japanese test data + japanese_name = "田中" + japanese_reasons = ["石灰化プラークが多い", "減衰プラークが少ない"] + japanese_comment = "非常に明確な石灰化所見があり、合併症リスクが高いと判断しました。" + + # Save annotation with Japanese text + save_annotation( + csv_path=csv_path, + case_id=200, + prediction="あり", + confidence=100, + reasons=japanese_reasons, + comment=japanese_comment, + annotator=japanese_name, + ground_truth=None # Test None ground truth + ) + + # Read back using utf-8-sig encoding (same as save) + df = pd.read_csv(csv_path, encoding='utf-8-sig') + + # Verify Japanese text is intact + assert df.loc[0, 'annotator'] == japanese_name, f"Japanese name corrupted: expected {japanese_name}, got {df.loc[0, 'annotator']}" + assert df.loc[0, 'prediction'] == "あり", "Japanese prediction corrupted" + + # Verify reasons (semicolon-separated Japanese text) + expected_reasons = "; ".join(japanese_reasons) + assert df.loc[0, 'reasons'] == expected_reasons, f"Japanese reasons corrupted: expected {expected_reasons}, got {df.loc[0, 'reasons']}" + + # Verify comment + assert df.loc[0, 'comment'] == japanese_comment, "Japanese comment corrupted" + + # Verify ground_truth is empty or NaN for None + gt_val = df.loc[0, 'ground_truth'] + assert pd.isna(gt_val) or gt_val == "", f"Ground truth should be empty or NaN for None, got {gt_val}" + + # Additional check: verify file can be opened with standard csv reader + with open(csv_path, 'r', encoding='utf-8-sig') as f: + reader = csv.DictReader(f) + rows = list(reader) + assert len(rows) == 1, "Should have 1 row" + assert rows[0]['annotator'] == japanese_name, "CSV reader can't read Japanese correctly" + + +def test_decimal_case_ids(setup_teardown): + """ + Test 4: Handling decimal case IDs (e.g., 134.1, 134.2). + + Verifies that: + - Decimal case IDs are stored correctly + - get_annotated_cases returns correct set of IDs + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_decimal_test.csv") + + # Save annotations with both integer and decimal case IDs + save_annotation( + csv_path=csv_path, + case_id=134, + prediction="あり", + confidence=75, + reasons=["石灰化プラークが多い"], + comment="Integer case ID", + annotator="田中", + ground_truth=True + ) + + save_annotation( + csv_path=csv_path, + case_id=134.1, + prediction="なし", + confidence=50, + reasons=["石灰化プラークが少ない"], + comment="Decimal case ID 1", + annotator="田中", + ground_truth=False + ) + + save_annotation( + csv_path=csv_path, + case_id=134.2, + prediction="あり", + confidence=80, + reasons=["減衰プラークが多い"], + comment="Decimal case ID 2", + annotator="田中", + ground_truth=True + ) + + # Verify all case IDs are correctly stored + df = pd.read_csv(csv_path, encoding='utf-8-sig') + assert len(df) == 3, "Should have 3 rows" + + # Check case IDs + case_ids = df['case_id'].tolist() + assert 134 in case_ids or 134.0 in case_ids, "Integer case ID 134 not found" + assert 134.1 in case_ids, "Decimal case ID 134.1 not found" + assert 134.2 in case_ids, "Decimal case ID 134.2 not found" + + +def test_get_annotated_cases(setup_teardown): + """ + Test 5: get_annotated_cases function. + + Verifies that: + - Function returns correct set of annotated case IDs + - Returns empty set for non-existent file + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_test.csv") + + # Save annotations + save_annotation( + csv_path=csv_path, + case_id=134, + prediction="あり", + confidence=75, + reasons=["石灰化プラークが多い"], + comment="Case 134", + annotator="田中", + ground_truth=True + ) + + save_annotation( + csv_path=csv_path, + case_id=135, + prediction="なし", + confidence=50, + reasons=["石灰化プラークが少ない"], + comment="Case 135", + annotator="田中", + ground_truth=False + ) + + save_annotation( + csv_path=csv_path, + case_id=136, + prediction="あり", + confidence=80, + reasons=["減衰プラークが多い"], + comment="Case 136", + annotator="田中", + ground_truth=True + ) + + # Test: Get all annotated cases + all_cases = get_annotated_cases(csv_path) + assert len(all_cases) == 3, f"Should have 3 unique cases, got {len(all_cases)}" + assert 134 in all_cases, "Case 134 should be in annotated cases" + assert 135 in all_cases, "Case 135 should be in annotated cases" + assert 136 in all_cases, "Case 136 should be in annotated cases" + + # Test: Filter by annotator + tanaka_cases = get_annotated_cases(csv_path, annotator="田中") + assert len(tanaka_cases) == 3, f"田中 should have 3 cases, got {len(tanaka_cases)}" + + # Test: Non-existent file returns empty set + non_existent_path = os.path.join(temp_dir, "non_existent.csv") + empty_cases = get_annotated_cases(non_existent_path) + assert len(empty_cases) == 0, "Non-existent file should return empty set" + assert isinstance(empty_cases, set), "Should return a set type" + + +def test_initialize_csv_idempotent(setup_teardown): + """ + Test 6: initialize_csv is idempotent (safe to call multiple times). + + Verifies that: + - Calling initialize_csv multiple times doesn't corrupt file + - Existing data is preserved + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_idempotent_test.csv") + + # First initialization + initialize_csv(csv_path) + assert os.path.exists(csv_path), "File should be created" + + # Add some data + save_annotation( + csv_path=csv_path, + case_id=100, + prediction="あり", + confidence=75, + reasons=["石灰化プラークが多い"], + comment="Test data", + annotator="田中", + ground_truth=True + ) + + # Call initialize_csv again (should not corrupt file) + initialize_csv(csv_path) + + # Verify data is still intact + df = pd.read_csv(csv_path, encoding='utf-8-sig') + assert len(df) == 1, "Data should be preserved after re-initialization" + assert df.loc[0, 'case_id'] == 100, "Case ID should be preserved" + assert df.loc[0, 'annotator'] == "田中", "Annotator should be preserved" + + +def test_empty_reasons_list(setup_teardown): + """ + Test 7: Handling empty reasons list. + + Verifies that: + - Empty reasons list is handled gracefully + - Results in empty string in CSV + """ + temp_dir = setup_teardown + csv_path = os.path.join(temp_dir, "annotations_empty_reasons.csv") + + # Save annotation with empty reasons + save_annotation( + csv_path=csv_path, + case_id=200, + prediction="なし", + confidence=50, + reasons=[], # Empty list + comment="No specific reasons", + annotator="田中", + ground_truth=False + ) + + # Read and verify + df = pd.read_csv(csv_path, encoding='utf-8-sig') + assert len(df) == 1, "Should have 1 row" + + # Empty reasons should result in empty string + reasons_value = df.loc[0, 'reasons'] + assert reasons_value == "" or pd.isna(reasons_value), f"Empty reasons should be empty string, got: {reasons_value}"