diff --git a/.github/conda/bld.bat b/.github/conda/bld.bat
deleted file mode 100644
index 89481145..00000000
--- a/.github/conda/bld.bat
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-$PYTHON setup.py install --single-version-externally-managed --record=record.txt
diff --git a/.github/conda/build.sh b/.github/conda/build.sh
deleted file mode 100644
index 89481145..00000000
--- a/.github/conda/build.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-$PYTHON setup.py install --single-version-externally-managed --record=record.txt
diff --git a/.github/conda/conda_build_config.yaml b/.github/conda/conda_build_config.yaml
deleted file mode 100644
index 49777cb0..00000000
--- a/.github/conda/conda_build_config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-python:
- - 3.13
- - 3.12
- - 3.11
- - 3.10
-
-
diff --git a/.github/conda/meta.yaml b/.github/conda/meta.yaml
deleted file mode 100644
index b78f32b9..00000000
--- a/.github/conda/meta.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
-{% set data = load_setup_py_data() %}
-
-package:
- name: ctlearn
- version: {{ data.get('version') }}
-source:
- path: ../..
-
-build:
- #noarch: generic
- number: 0
-
-requirements:
- build:
- - python #==3.12
- - numpy >=1.20
- - setuptools
- - astropy
- - scipy
- - jupyter
- - ctapipe ==0.20.0
- - pytables >=3.8
- - pandas
- host:
- - python #==3.12
- - numpy >=1.20
- - astropy
- - setuptools
- - scipy
- - jupyter
- - ctapipe ==0.20.0
- - pytables >=3.8
- - pandas
- run:
- - python #==3.12
- - numpy >=1.20
- - jupyter
- - setuptools
- - astropy
- - scipy
- - ctapipe ==0.20.0
- - pytables >=3.8
- - pandas
-
- test:
- imports:
- - ctlearn
-about:
- home: https://github.com/ctlearn-project/ctlearn/
- license: BSD3-Clause
- license_file: LICENSE
- summary: Deep Learning for IACT Event Reconstruction.
-extra:
- recipe-maintainers:
- - TjarkMiener
- - nietootein
diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml
index c2c8c558..da00acbf 100644
--- a/.github/workflows/python-package-conda.yml
+++ b/.github/workflows/python-package-conda.yml
@@ -1,4 +1,3 @@
-
name: CI
on:
@@ -15,39 +14,61 @@ jobs:
strategy:
matrix:
os: [ubuntu-22.04]
- pyv: [ '3.10','3.11', '3.12', '3.13']
- max-parallel: 5
+ python-version: ['3.12', '3.13', '3.14']
+ dl1dh-version: ['latest', 'nightly']
+ max-parallel: 6
runs-on: ${{ matrix.os }}
+ continue-on-error: ${{ matrix.dl1dh-version == 'nightly' || matrix.python-version == '3.14' }}
steps:
- - uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.pyv }}
- run: |
- # Install Miniconda
- wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
- bash miniconda.sh -b -p $HOME/miniconda
- echo "$HOME/miniconda/bin" >> $GITHUB_PATH
- source $HOME/miniconda/bin/activate
- # Install Mamba via conda (since we don't have mamba yet)
- $HOME/miniconda/bin/conda config --add channels conda-forge
- $HOME/miniconda/bin/conda install -y mamba=2.0.8
- mamba install -y python=${{ matrix.pyv }}
- - name: Add MKL_THREADING_LAYER variable
- run: echo "MKL_THREADING_LAYER=GNU" >> $GITHUB_ENV
- - name: Install dependencies with Mamba
- run: |
- source $HOME/miniconda/bin/activate
- mamba env update --file environment.yml --name base
- - name: Lint with flake8
- run: |
- source $HOME/miniconda/bin/activate
- mamba install flake8
- flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- - name: Install with pip
- run: |
- pip install -e .
- - name: Test with pytest
- run: |
- source $HOME/miniconda/bin/activate
- mamba install pytest
- pytest
+ - uses: actions/checkout@v4
+
+ - name: Set up Miniconda
+ run: |
+ wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
+ bash miniconda.sh -b -p $HOME/miniconda
+ echo "$HOME/miniconda/bin" >> $GITHUB_PATH
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ conda config --add channels conda-forge
+ conda install -y mamba
+
+ - name: Create Python environment
+ run: |
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ mamba create -y -n ctlearn python==${{ matrix.python-version }} -c conda-forge
+ conda activate ctlearn
+
+ - name: Install dependencies
+ run: |
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ conda activate ctlearn
+ sudo apt-get update
+ sudo apt-get install -y git
+ pip install --upgrade pip
+ pip install pylint pylint-exit anybadge eventio pytest flake8
+ if [ "${{ matrix.dl1dh-version }}" = "nightly" ]; then
+ pip install git+https://github.com/cta-observatory/dl1-data-handler.git
+ else
+ pip install dl1-data-handler
+ fi
+
+ - name: Add MKL_THREADING_LAYER variable
+ run: echo "MKL_THREADING_LAYER=GNU" >> $GITHUB_ENV
+
+ - name: Lint with flake8
+ run: |
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ conda activate ctlearn
+ flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+
+ - name: Install package with pip
+ run: |
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ conda activate ctlearn
+ pip install -e .
+
+ - name: Run pytest
+ run: |
+ source $HOME/miniconda/etc/profile.d/conda.sh
+ conda activate ctlearn
+ pytest
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 646613b1..984895f4 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -2,13 +2,17 @@ name: Release CD
on:
release:
- types: [published]
+ types: [published]
workflow_dispatch:
+ pull_request:
+ paths:
+ - '.github/workflows/**'
+
jobs:
pypi-publish:
name: Publish release to PyPI
environment:
- name: pypi
+ name: pypi
url: https://pypi.org/project/ctlearn/
permissions:
id-token: write
@@ -20,64 +24,29 @@ jobs:
strategy:
matrix:
os: [ubuntu-22.04]
- pyv: ['3.10']
+ pyv: ['3.12']
max-parallel: 5
runs-on: ${{ matrix.os }}
+
steps:
- - uses: actions/checkout@v4
- - name: Set up Python ${{ matrix.pyv }}
- run: |
- conda install -y python=${{ matrix.pyv }}
-
- - name: Add conda to system path
- run: |
- #$CONDA is an environment variable pointing to the root of the miniconda directory
- echo $CONDA/bin >> $GITHUB_PATH
-
- - name: Install dependencies
- run: |
- conda env update --file environment.yml --name base
-
- - name: Build package
- run: |
- python --version
- pip install -U build
- python -m build
- - name: Publish package distributions to PyPI
- uses: pypa/gh-action-pypi-publish@release/v1
+ - uses: actions/checkout@v4
+ - name: Set up micromamba (Python ${{ matrix.pyv }})
+ uses: mamba-org/setup-micromamba@v1
+ with:
+ environment-name: ctlearn
+ create-args: >-
+ python=${{ matrix.pyv }}
+ pip
+ cache-environment: true
- condapublish:
- strategy:
- matrix:
- os: [ubuntu-22.04]
- pyv: ["3.10"]
- max-parallel: 5
- runs-on: ${{ matrix.os }}
- permissions:
- id-token: write
- contents: write
- steps:
- - uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Set up Python ${{ matrix.pyv }}
- run: |
- conda install -y python=${{ matrix.pyv }}
-
- - name: Add conda to system path
- run: |
- # $CONDA is an environment variable pointing to the root of the miniconda directory
- echo $CONDA/bin >> $GITHUB_PATH
-
- - name: Install dependencies
- run: |
- conda env update --file environment.yml --name base
- sudo apt-get install python3-numpy
-
- - name: publish-to-conda
- uses: fcakyon/conda-publish-action@v1.3
- with:
- subdir: '.github/conda'
- anacondatoken: ${{ secrets.ANACONDA_TOKEN }}
+ - name: Build package
+ shell: micromamba-shell {0}
+ run: |
+ python --version
+ pip install -U build
+ python -m build
+
+ - name: Publish package distributions to PyPI
+ if: github.event_name == 'release'
+ uses: pypa/gh-action-pypi-publish@release/v1
\ No newline at end of file
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 9d5aecee..71fb5123 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -6,7 +6,7 @@ sphinx:
build:
os: ubuntu-22.04
tools:
- python: "3.10"
+ python: "3.12"
python:
install:
diff --git a/README.rst b/README.rst
index 81556e43..f69dbb25 100644
--- a/README.rst
+++ b/README.rst
@@ -28,22 +28,22 @@ CTLearn is a package under active development to run deep learning models to ana
Installation for users
----------------------
-Download and install `Anaconda `_\ , or, for a minimal installation, `Miniconda `_.
-The following command will set up a conda virtual environment, add the
-necessary package channels, and install CTLearn specified version and its dependencies:
+Installation
+------------
+
+First, create and activate a fresh conda environment:
.. code-block:: bash
- CTLEARN_VER=0.10.3
- wget https://raw.githubusercontent.com/ctlearn-project/ctlearn/v$CTLEARN_VER/environment.yml
- conda env create -n [ENVIRONMENT_NAME] -f environment.yml
- conda activate [ENVIRONMENT_NAME]
- pip install ctlearn==$CTLEARN_VER
- ctlearn -h
+ mamba create -n ctlearn -c conda-forge python==3.12 llvmlite
+ mamba activate ctlearn
+
+The lastest version fo this package can be installed as a pip package:
+.. code-block:: bash
-This should automatically install all dependencies (NOTE: this may take some time, as by default MKL is included as a dependency of NumPy and it is very large).
+ pip install ctlearn
See the documentation for further information like `installation instructions for developers `_, `package usage `_, and `dependencies `_ among other topics.
diff --git a/ctlearn/conftest.py b/ctlearn/conftest.py
index d912ae18..c8d22ef8 100644
--- a/ctlearn/conftest.py
+++ b/ctlearn/conftest.py
@@ -1,16 +1,23 @@
"""
common pytest fixtures for tests in ctlearn.
-Credits to ctapipe for the original code.
"""
import pytest
+import shutil
from ctapipe.core import run_tool
from ctapipe.utils import get_dataset_path
+from ctlearn.tools import TrainCTLearnModel
@pytest.fixture(scope="session")
-def prod5_gamma_simtel_path():
- return get_dataset_path("gamma_prod5.simtel.zst")
+def gamma_simtel_path():
+ return get_dataset_path("gamma_test_large.simtel.gz")
+
+@pytest.fixture(scope="session")
+def proton_simtel_path():
+ return get_dataset_path(
+ "proton_20deg_0deg_run4___cta-prod5-paranal_desert-2147m-Paranal-dark-100evts.simtel.zst"
+ )
@pytest.fixture(scope="session")
def dl1_tmp_path(tmp_path_factory):
@@ -23,25 +30,45 @@ def r1_tmp_path(tmp_path_factory):
return tmp_path_factory.mktemp("r1_")
@pytest.fixture(scope="session")
-def dl1_gamma_file(dl1_tmp_path, prod5_gamma_simtel_path):
+def dl1_gamma_file(dl1_tmp_path, gamma_simtel_path):
"""
DL1 file containing both images and parameters from a gamma simulation set.
"""
from ctapipe.tools.process import ProcessorTool
+ allowed_tels = {7, 13, 15, 16, 17, 19}
output = dl1_tmp_path / "gamma.dl1.h5"
+ argv = [
+ f"--input={gamma_simtel_path}",
+ f"--output={output}",
+ "--write-images",
+ "--SimTelEventSource.focal_length_choice=EQUIVALENT",
+ f"--SimTelEventSource.allowed_tels={allowed_tels}",
+ ]
+ assert run_tool(ProcessorTool(), argv=argv, cwd=dl1_tmp_path) == 0
+ return output
+
+@pytest.fixture(scope="session")
+def dl1_proton_file(dl1_tmp_path, proton_simtel_path):
+ """
+ DL1 file containing both images and parameters from a proton simulation set.
+ """
+ from ctapipe.tools.process import ProcessorTool
+ allowed_tels = {7, 13, 15, 16, 17, 19}
+ output = dl1_tmp_path / "proton.dl1.h5"
argv = [
- f"--input={prod5_gamma_simtel_path}",
+ f"--input={proton_simtel_path}",
f"--output={output}",
"--write-images",
- "--DataWriter.Contact.name=αℓℓ the äüöß",
+ "--SimTelEventSource.focal_length_choice=EQUIVALENT",
+ f"--SimTelEventSource.allowed_tels={allowed_tels}",
]
assert run_tool(ProcessorTool(), argv=argv, cwd=dl1_tmp_path) == 0
return output
@pytest.fixture(scope="session")
-def r1_gamma_file(r1_tmp_path, prod5_gamma_simtel_path):
+def r1_gamma_file(r1_tmp_path, gamma_simtel_path):
"""
R1 file containing both waveforms and parameters from a gamma simulation set.
"""
@@ -50,10 +77,62 @@ def r1_gamma_file(r1_tmp_path, prod5_gamma_simtel_path):
output = r1_tmp_path / "gamma.r1.h5"
argv = [
- f"--input={prod5_gamma_simtel_path}",
+ f"--input={gamma_simtel_path}",
f"--output={output}",
f"--DataWriter.write_r1_waveforms=True",
- "--DataWriter.Contact.name=αℓℓ the äüöß",
+ "--SimTelEventSource.focal_length_choice=EQUIVALENT",
]
assert run_tool(ProcessorTool(), argv=argv, cwd=r1_tmp_path) == 0
- return output
\ No newline at end of file
+ return output
+
+@pytest.fixture(scope="session")
+def ctlearn_trained_dl1_models(dl1_gamma_file, dl1_proton_file, tmp_path_factory):
+ """
+ Test training CTLearn model using the DL1 gamma and proton files for all reconstruction tasks.
+ Each test run gets its own isolated temp directories.
+ """
+ tmp_path = tmp_path_factory.mktemp("ctlearn_models")
+
+ # Temporary directories for signal and background
+ signal_dir = tmp_path / "gamma_dl1"
+ signal_dir.mkdir(parents=True, exist_ok=True)
+
+ background_dir = tmp_path / "proton_dl1"
+ background_dir.mkdir(parents=True, exist_ok=True)
+
+ # Hardcopy DL1 gamma file to the signal directory
+ shutil.copy(dl1_gamma_file, signal_dir)
+ # Hardcopy DL1 proton file to the background directory
+ shutil.copy(dl1_proton_file, background_dir)
+
+ ctlearn_trained_dl1_models = {}
+ for reco_task in ["type", "energy", "cameradirection"]:
+ # Output directory for trained model
+ output_dir = tmp_path / f"ctlearn_{reco_task}"
+
+ # Build command-line arguments
+ argv = [
+ f"--signal={signal_dir}",
+ "--pattern-signal=*.dl1.h5",
+ f"--output={output_dir}",
+ f"--reco={reco_task}",
+ "--TrainCTLearnModel.n_epochs=1",
+ "--TrainCTLearnModel.batch_size=2",
+ "--DLImageReader.focal_length_choice=EQUIVALENT",
+ ]
+
+ # Include background only for classification task
+ if reco_task == "type":
+ argv.extend([
+ f"--background={background_dir}",
+ "--pattern-background=*.dl1.h5",
+ "--DLImageReader.enforce_subarray_equality=False",
+ ])
+
+ # Run training
+ assert run_tool(TrainCTLearnModel(), argv=argv, cwd=tmp_path) == 0
+
+ ctlearn_trained_dl1_models[reco_task] = output_dir / "ctlearn_model.keras"
+ # Check that the trained model exists
+ assert ctlearn_trained_dl1_models[reco_task].exists()
+ return ctlearn_trained_dl1_models
\ No newline at end of file
diff --git a/ctlearn/core/loader.py b/ctlearn/core/loader.py
index 5b723f9c..92fd96c2 100644
--- a/ctlearn/core/loader.py
+++ b/ctlearn/core/loader.py
@@ -155,7 +155,7 @@ def _get_mono_item(self, batch):
"""
# Retrieve the telescope images and store in the features dictionary
labels = {}
- features = {"input": batch["features"].data}
+ features = batch["features"].data
if "type" in self.tasks:
labels["type"] = to_categorical(
batch["true_shower_primary_class"].data,
@@ -186,9 +186,6 @@ def _get_mono_item(self, batch):
),
axis=1,
)
- # Temp fix for supporting keras2 & keras3
- if int(keras.__version__.split(".")[0]) >= 3:
- features = features["input"]
return features, labels
def _get_stereo_item(self, batch):
@@ -303,13 +300,10 @@ def _get_stereo_item(self, batch):
)
# Store the fatures in the features dictionary
if "features" in batch.colnames:
- features = {"input": np.array(features)}
+ features = np.array(features)
# TDOO: Add support for both feature vectors
if "mono_feature_vectors" in batch.colnames:
- features = {"input": np.array(mono_feature_vectors)}
+ features = np.array(mono_feature_vectors)
if "stereo_feature_vectors" in batch.colnames:
- features = {"input": np.array(stereo_feature_vectors)}
- # Temp fix for supporting keras2 & keras3
- if int(keras.__version__.split(".")[0]) >= 3:
- features = features["input"]
+ features = np.array(stereo_feature_vectors)
return features, labels
diff --git a/ctlearn/core/model.py b/ctlearn/core/model.py
index 07c79d46..bfac48c2 100644
--- a/ctlearn/core/model.py
+++ b/ctlearn/core/model.py
@@ -288,7 +288,7 @@ def _build_backbone(self, input_shape):
"""
# Define the input layer from the input shape
- network_input = keras.Input(shape=input_shape, name="input")
+ network_input = keras.Input(shape=input_shape)
# Get model arcihtecture parameters for the backbone
filters_list = [layer["filters"] for layer in self.architecture]
kernel_sizes = [layer["kernel_size"] for layer in self.architecture]
@@ -472,7 +472,7 @@ def _build_backbone(self, input_shape):
Keras input layer object for the backbone model.
"""
# Define the input layer from the input shape
- network_input = keras.Input(shape=input_shape, name="input")
+ network_input = keras.Input(shape=input_shape)
# Apply initial padding if specified
if self.init_padding > 0:
network_input = keras.layers.ZeroPadding2D(
@@ -880,7 +880,7 @@ def _build_backbone(self, input_shape):
"""
# Define the input layer from the input shape
- network_input = keras.Input(shape=input_shape, name="input")
+ network_input = keras.Input(shape=input_shape)
# Set the backbone model to be trainable or not
for layer in self.model.layers:
if layer.name.endswith("_block"):
diff --git a/ctlearn/core/tests/test_loader.py b/ctlearn/core/tests/test_loader.py
index 97f87172..7fe71900 100644
--- a/ctlearn/core/tests/test_loader.py
+++ b/ctlearn/core/tests/test_loader.py
@@ -1,17 +1,17 @@
-import pytest
from traitlets.config.loader import Config
from dl1_data_handler.reader import DLImageReader
from ctlearn.core.loader import DLDataLoader
-def test_data_loader(dl1_tmp_path, dl1_gamma_file):
+def test_data_loader(dl1_gamma_file):
"""check"""
# Create a configuration suitable for the test
config = Config(
{
"DLImageReader": {
"allowed_tels": [4],
+ "focal_length_choice": "EQUIVALENT",
},
}
)
@@ -34,4 +34,4 @@ def test_data_loader(dl1_tmp_path, dl1_gamma_file):
and "skydirection" in labels
)
# Check the shape of the features
- assert features["input"].shape == (1, 110, 110, 2)
+ assert features.shape == (1, 110, 110, 2)
diff --git a/ctlearn/tools/__init__.py b/ctlearn/tools/__init__.py
index 996469a1..d54df32d 100644
--- a/ctlearn/tools/__init__.py
+++ b/ctlearn/tools/__init__.py
@@ -1,2 +1,13 @@
"""ctlearn command line tools.
"""
+
+from .train_model import TrainCTLearnModel
+from .predict_LST1 import LST1PredictionTool
+from .predict_model import MonoPredictCTLearnModel, StereoPredictCTLearnModel
+
+__all__ = [
+ "TrainCTLearnModel",
+ "LST1PredictionTool",
+ "MonoPredictCTLearnModel",
+ "StereoPredictCTLearnModel"
+]
\ No newline at end of file
diff --git a/ctlearn/tools/predict_LST1.py b/ctlearn/tools/predict_LST1.py
index e27e68c6..a4751d9a 100644
--- a/ctlearn/tools/predict_LST1.py
+++ b/ctlearn/tools/predict_LST1.py
@@ -120,7 +120,7 @@ class LST1PredictionTool(Tool):
load_type_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) "
+ "Path to a Keras model file (Keras3) "
"for the classification of the primary particle type."
),
allow_none=True,
@@ -132,7 +132,7 @@ class LST1PredictionTool(Tool):
load_energy_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) "
+ "Path to a Keras model file (Keras3) "
"for the regression of the primary particle energy."
),
allow_none=True,
@@ -144,7 +144,7 @@ class LST1PredictionTool(Tool):
load_cameradirection_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) "
+ "Path to a Keras model file (Keras3) "
"for the regression of the primary particle arrival direction "
"based on the camera coordinate offsets."
),
@@ -514,9 +514,6 @@ def start(self):
image = get_unmapped_image(event, self.channels, self.transforms)
data.append(self.image_mapper.map_image(image))
input_data = {"input": np.array(data)}
- # Temp fix for supporting keras2 & keras3
- if int(keras.__version__.split(".")[0]) >= 3:
- input_data = input_data["input"]
event_id.extend(dl1_table["event_id"].data)
tel_azimuth.extend(dl1_table["tel_az"].data)
diff --git a/ctlearn/tools/predict_model.py b/ctlearn/tools/predict_model.py
index 0eaa8b23..3d55b150 100644
--- a/ctlearn/tools/predict_model.py
+++ b/ctlearn/tools/predict_model.py
@@ -65,12 +65,6 @@
SUBARRAY_EVENT_KEYS = ["obs_id", "event_id"]
TELESCOPE_EVENT_KEYS = ["obs_id", "event_id", "tel_id"]
-__all__ = [
- "PredictCTLearnModel",
- "MonoPredictCTLearnModel",
- "StereoPredictCTLearnModel",
-]
-
class PredictCTLearnModel(Tool):
"""
@@ -107,14 +101,14 @@ class PredictCTLearnModel(Tool):
prefix : str
Name of the reconstruction algorithm used to generate the dl2 data.
load_type_model_from : pathlib.Path
- Path to a Keras model file (Keras3) or directory (Keras2) for the classification of the primary particle type.
+ Path to a Keras model file (Keras3) for the classification of the primary particle type.
load_energy_model_from : pathlib.Path
- Path to a Keras model file (Keras3) or directory (Keras2) for the regression of the primary particle energy.
+ Path to a Keras model file (Keras3) for the regression of the primary particle energy.
load_cameradirection_model_from : pathlib.Path
- Path to a Keras model file (Keras3) or directory (Keras2) for the regression
+ Path to a Keras model file (Keras3) for the regression
of the primary particle arrival direction based on camera coordinate offsets.
load_skydirection_model_from : pathlib.Path
- Path to a Keras model file (Keras3) or directory (Keras2) for the regression
+ Path to a Keras model file (Keras3) for the regression
of the primary particle arrival direction based on spherical coordinate offsets.
output_path : pathlib.Path
Output path to save the dl2 prediction results.
@@ -228,7 +222,7 @@ class PredictCTLearnModel(Tool):
load_type_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) for the classification "
+ "Path to a Keras model file (Keras3) for the classification "
"of the primary particle type."
),
allow_none=True,
@@ -240,7 +234,7 @@ class PredictCTLearnModel(Tool):
load_energy_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) for the regression "
+ "Path to a Keras model file (Keras3) for the regression "
"of the primary particle energy."
),
allow_none=True,
@@ -252,7 +246,7 @@ class PredictCTLearnModel(Tool):
load_cameradirection_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) for the regression "
+ "Path to a Keras model file (Keras3) for the regression "
"of the primary particle arrival direction based on camera coordinate offsets."
),
allow_none=True,
@@ -264,7 +258,7 @@ class PredictCTLearnModel(Tool):
load_skydirection_model_from = Path(
default_value=None,
help=(
- "Path to a Keras model file (Keras3) or directory (Keras2) for the regression "
+ "Path to a Keras model file (Keras3) for the regression "
"of the primary particle arrival direction based on spherical coordinate offsets."
),
allow_none=True,
@@ -435,7 +429,7 @@ def _predict_with_model(self, model_path):
Parameters
----------
model_path : str
- Path to a Keras model file (Keras3) or directory (Keras2).
+ Path to a Keras model file (Keras3).
Returns
-------
diff --git a/ctlearn/tools/tests/test_predict_model.py b/ctlearn/tools/tests/test_predict_model.py
new file mode 100644
index 00000000..55e6d97e
--- /dev/null
+++ b/ctlearn/tools/tests/test_predict_model.py
@@ -0,0 +1,77 @@
+import shutil
+import numpy as np
+
+from ctapipe.core import run_tool
+from ctapipe.io import TableLoader
+from ctlearn.tools import MonoPredictCTLearnModel
+
+def test_predict_model(ctlearn_trained_dl1_models, dl1_gamma_file, tmp_path):
+ """
+ Test training CTLearn model using the DL1 gamma and proton files for all reconstruction tasks.
+ Each test run gets its own isolated temp directories.
+ """
+
+ model_dir = tmp_path / "trained_models"
+ model_dir.mkdir(parents=True, exist_ok=True)
+
+ dl2_dir = tmp_path / "dl2_output"
+ dl2_dir.mkdir(parents=True, exist_ok=True)
+
+ # Hardcopy the trained models to the model directory
+ for reco_task in ["type", "energy", "cameradirection"]:
+ shutil.copy(ctlearn_trained_dl1_models[f"{reco_task}"], model_dir / f"ctlearn_model_{reco_task}.keras")
+ model_file = model_dir / f"ctlearn_model_{reco_task}.keras"
+ assert model_file.exists(), f"Trained model file not found for {reco_task}"
+
+ # Build command-line arguments
+ output_file = dl2_dir / "gamma.dl2.h5"
+ argv = [
+ f"--input_url={dl1_gamma_file}",
+ f"--output={output_file}",
+ "--PredictCTLearnModel.batch_size=4",
+ "--DLImageReader.focal_length_choice=EQUIVALENT",
+ ]
+
+ # Run Prediction for energy and type together
+ assert run_tool(
+ MonoPredictCTLearnModel(),
+ argv = argv + [
+ f"--PredictCTLearnModel.load_type_model_from={model_dir}/ctlearn_model_type.keras",
+ f"--PredictCTLearnModel.load_energy_model_from={model_dir}/ctlearn_model_energy.keras",
+ "--use-HDF5Merger",
+ "--no-dl1-images",
+ "--no-true-images",
+ ],
+ cwd=tmp_path
+ ) == 0
+
+ assert run_tool(
+ MonoPredictCTLearnModel(),
+ argv= argv + [
+ f"--PredictCTLearnModel.load_cameradirection_model_from="
+ f"{model_dir}/ctlearn_model_cameradirection.keras",
+ "--no-use-HDF5Merger",
+ ],
+ cwd=tmp_path,
+ ) == 0
+
+
+ allowed_tels = [7, 13, 15, 16, 17, 19]
+ required_columns = [
+ "telescope_pointing_azimuth",
+ "telescope_pointing_altitude",
+ "CTLearn_alt",
+ "CTLearn_az",
+ "CTLearn_prediction",
+ "CTLearn_energy",
+ ]
+ # Check that the output DL2 file was created
+ assert output_file.exists(), "Output DL2 file not created"
+ # Check that the created DL2 file can be read with the TableLoader
+ with TableLoader(output_file, pointing=True, focal_length_choice="EQUIVALENT") as loader:
+ events = loader.read_telescope_events_by_id(telescopes=allowed_tels)
+ for tel_id in allowed_tels:
+ assert len(events[tel_id]) > 0
+ for col in required_columns:
+ assert col in events[tel_id].colnames, f"{col} missing in DL2 file {output_file.name}"
+ assert events[tel_id][col][0] is not np.nan, f"{col} has NaN values in DL2 file {output_file.name}"
\ No newline at end of file
diff --git a/ctlearn/tools/tests/test_train_model.py b/ctlearn/tools/tests/test_train_model.py
new file mode 100644
index 00000000..ed661409
--- /dev/null
+++ b/ctlearn/tools/tests/test_train_model.py
@@ -0,0 +1,76 @@
+import pandas as pd
+import pytest
+import shutil
+
+from ctapipe.core import run_tool
+from ctlearn.tools import TrainCTLearnModel
+
+@pytest.mark.parametrize("reco_task", ["type", "energy", "cameradirection", "skydirection"])
+def test_train_ctlearn_model(reco_task, dl1_gamma_file, dl1_proton_file, tmp_path):
+ """
+ Test training CTLearn model using the DL1 gamma and proton files for all reconstruction tasks.
+ Each test run gets its own isolated temp directories.
+ """
+ # Temporary directories for signal and background
+ signal_dir = tmp_path / "gamma_dl1"
+ signal_dir.mkdir(parents=True, exist_ok=True)
+
+ background_dir = tmp_path / "proton_dl1"
+ background_dir.mkdir(parents=True, exist_ok=True)
+
+ # Hardcopy DL1 gamma file to the signal directory
+ shutil.copy(dl1_gamma_file, signal_dir)
+ # Hardcopy DL1 proton file to the background directory
+ shutil.copy(dl1_proton_file, background_dir)
+
+ # Output directory for trained model
+ output_dir = tmp_path / f"ctlearn_{reco_task}"
+
+ # Build command-line arguments
+ argv = [
+ f"--signal={signal_dir}",
+ "--pattern-signal=*.dl1.h5",
+ f"--output={output_dir}",
+ f"--reco={reco_task}",
+ "--TrainCTLearnModel.n_epochs=2",
+ "--TrainCTLearnModel.batch_size=4",
+ "--DLImageReader.focal_length_choice=EQUIVALENT",
+ ]
+
+ # Include background only for classification task
+ if reco_task == "type":
+ argv.extend([
+ f"--background={background_dir}",
+ "--pattern-background=*.dl1.h5",
+ "--DLImageReader.enforce_subarray_equality=False",
+ ])
+
+ # Run training
+ assert run_tool(TrainCTLearnModel(), argv=argv, cwd=tmp_path) == 0
+
+ # --- Additional checks ---
+ # Check that the trained model exists
+ model_file = output_dir / "ctlearn_model.keras"
+ assert model_file.exists(), f"Trained model file not found for {reco_task}"
+ # Check training_log.csv exists
+ log_file = output_dir / "training_log.csv"
+ assert log_file.exists(), f"Training log file not found for {reco_task}"
+ # Read CSV and verify number of epochs
+ log_df = pd.read_csv(log_file)
+ num_epochs_logged = log_df.shape[0]
+ assert num_epochs_logged == 2, f"Expected two epochs, found {num_epochs_logged} for {reco_task}"
+ # Check that val_loss column exists
+ assert "val_loss" in log_df.columns, (
+ f"'val_loss' column missing in training_log.csv for {reco_task}"
+ )
+ val_loss_min= 0.0
+ val_loss_max= 1.5 if reco_task == "skydirection" else 1.0
+ # Check val_loss values are between 0.0 and 1.0 (or 1.5 for skydirection)
+ val_loss = log_df["val_loss"].dropna()
+ assert not val_loss.empty, (
+ f"'val_loss' column is empty for {reco_task}"
+ )
+ assert ((val_loss >= val_loss_min) & (val_loss <= val_loss_max)).all(), (
+ f"'val_loss' values out of range [{val_loss_min}, {val_loss_max}] for {reco_task}: "
+ f"{val_loss.tolist()}"
+ )
\ No newline at end of file
diff --git a/ctlearn/tools/train_model.py b/ctlearn/tools/train_model.py
index 2ce4ced5..4c440350 100644
--- a/ctlearn/tools/train_model.py
+++ b/ctlearn/tools/train_model.py
@@ -335,11 +335,7 @@ def setup(self):
monitor = "val_loss"
monitor_mode = "min"
# Model checkpoint callback
- # Temp fix for supporting keras2 & keras3
- if int(keras.__version__.split(".")[0]) >= 3:
- model_path = f"{self.output_dir}/ctlearn_model.keras"
- else:
- model_path = f"{self.output_dir}/ctlearn_model.cpk"
+ model_path = f"{self.output_dir}/ctlearn_model.keras"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=model_path,
monitor=monitor,
diff --git a/environment.yml b/environment.yml
deleted file mode 100644
index 20a49207..00000000
--- a/environment.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-# conda env create -f environment.yml
-name: ctlearn
-channels:
- - anaconda
- - conda-forge
-dependencies:
- - python=3.10
- - astropy
- - setuptools
- - numpy
- - pandas
- - pip
- - pytables
- - tables
- - c-blosc2=2.13
- - pyyaml
- - scikit-learn
- - ctapipe
- - pip:
- - numba
- - tensorflow>=2.14,<2.15
- - dl1_data_handler>=0.14.5,<0.15
- - pydot
diff --git a/pyproject.toml b/pyproject.toml
index 434bd27a..c361c71f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,16 +20,15 @@ classifiers = [
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Physics",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.10",
- "Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
]
-requires-python = ">=3.10"
+requires-python = ">=3.12"
dependencies = [
- "dl1_data_handler>=0.14.5, <0.15",
+ "dl1_data_handler>=0.14.5",
"astropy",
"numpy",
"pandas",
@@ -37,10 +36,10 @@ dependencies = [
"pyyaml",
"scikit-learn",
"numba",
- "tensorflow>=2.14,<2.15",
+ "tensorflow>=2.16",
"pydot",
"setuptools",
- "ctapipe>=0.22.0, <0.26",
+ "ctapipe[all]>=0.28",
]
dynamic = ["version"]