diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 80316f78..5e6acf5d 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -16,7 +16,7 @@ jobs: # os: [ubuntu-latest, macos-latest] # python-version: ['3.8', '3.9', '3.10', '3.11'] os: [macos-latest ] - python-version: [ '3.8', '3.9' ] + python-version: [ '3.8' ] include: - os: windows-latest python-version: '3.10' diff --git a/tests/test_plots.py b/tests/test_plots.py index b4f8802a..5ab44f1b 100644 --- a/tests/test_plots.py +++ b/tests/test_plots.py @@ -76,618 +76,618 @@ def is_github_ci(): show_plots = False -class TestPlots(unittest.TestCase): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) # Call the parent constructor - # Define the save directory - self.artifacts = os.path.join(os.path.dirname(__file__), "artifacts") - self.save_dir = os.path.join(self.artifacts, "plots") - os.makedirs(self.save_dir, exist_ok=True) - - def savefig(self, ax, name): - ax.figure.savefig(os.path.join(self.save_dir, name)) - - -class TestTimeSeriesPlots(TestPlots): - - def setUp(self): - # This method is called before each test. - # Load the stochastic event sets and observation here. - - cat_file_m2 = csep.datasets.comcat_example_catalog_fname - cat_file_m5 = os.path.join( - self.artifacts, - "example_csep2_forecasts", - "Catalog", - "catalog.json", - ) - - forecast_file = os.path.join( - self.artifacts, - "example_csep2_forecasts", - "Forecasts", - "ucerf3-landers_short.csv", - ) - - self.stochastic_event_sets = csep.load_catalog_forecast(forecast_file) - self.observation_m5 = catalogs.CSEPCatalog.load_json(cat_file_m5) - self.observation_m2 = csep.load_catalog(cat_file_m2) - - def test_plot_magnitude_vs_time(self): - # Basic test - ax = plot_magnitude_vs_time(catalog=self.observation_m2, show=show_plots) - self.assertEqual(ax.get_title(), "Magnitude vs. Time") - self.assertEqual(ax.get_xlabel(), "Datetime") - self.assertEqual(ax.get_ylabel(), "$M$") - - # Test with custom color - ax = plot_magnitude_vs_time(catalog=self.observation_m2, color='red', show=show_plots) - scatter_color = ax.collections[0].get_facecolor()[0] - self.assertTrue(all(scatter_color[:3] == (1.0, 0.0, 0.0))) # Check if color is red - - # Test with custom marker size - ax = plot_magnitude_vs_time(catalog=self.observation_m2, size=25, max_size=600, - show=show_plots) - scatter_sizes = ax.collections[0].get_sizes() - func_sizes = _autosize_scatter(self.observation_m2.data["magnitude"], 25, 600, 4) - numpy.testing.assert_array_almost_equal(scatter_sizes, func_sizes) - - # Test with custom alpha - ax = plot_magnitude_vs_time(catalog=self.observation_m2, alpha=0.5, show=show_plots) - scatter_alpha = ax.collections[0].get_alpha() - self.assertEqual(scatter_alpha, 0.5) - - # Test with custom marker size power - ax = plot_magnitude_vs_time(catalog=self.observation_m2, power=6, show=show_plots) - scatter_sizes = ax.collections[0].get_sizes() - func_sizes = _autosize_scatter(self.observation_m2.data["magnitude"], 4, 300, 6) - numpy.testing.assert_array_almost_equal(scatter_sizes, func_sizes) - # - # # Test with show=True (just to ensure no errors occur) - plot_magnitude_vs_time(catalog=self.observation_m2, show=True) - - def test_plot_cumulative_events_default(self): - # Test with default arguments to ensure basic functionality - ax = plot_cumulative_events_versus_time( - catalog_forecast=self.stochastic_event_sets, - observation=self.observation_m5, - show=show_plots - ) - - self.assertIsNotNone(ax.get_title()) - self.assertIsNotNone(ax.get_xlabel()) - self.assertIsNotNone(ax.get_ylabel()) - - def test_plot_cumulative_events_hours(self): - # Test with time_axis set to 'hours' - ax = plot_cumulative_events_versus_time( - catalog_forecast=self.stochastic_event_sets, - observation=self.observation_m5, - bins=50, - time_axis="hours", - xlabel="Hours since Mainshock", - ylabel="Cumulative Event Count", - title="Cumulative Event Counts by Hour", - legend_loc="upper left", - show=show_plots - ) - - self.assertEqual(ax.get_xlabel(), "Hours since Mainshock") - self.assertEqual(ax.get_ylabel(), "Cumulative Event Count") - self.assertEqual(ax.get_title(), "Cumulative Event Counts by Hour") - - def test_plot_cumulative_events_different_bins(self): - # Test with different number of bins - ax = plot_cumulative_events_versus_time( - catalog_forecast=self.stochastic_event_sets, - observation=self.observation_m5, - bins=200, - show=show_plots, - figsize=(12, 8), - time_axis="days", - xlabel="Days since Mainshock", - ylabel="Cumulative Event Count", - title="Cumulative Event Counts with More Bins", - legend_loc="best" - ) - - self.assertEqual(ax.get_title(), "Cumulative Event Counts with More Bins") - self.assertEqual(ax.get_xlabel(), "Days since Mainshock") - self.assertEqual(ax.get_ylabel(), "Cumulative Event Count") - - def test_plot_cumulative_events_custom_legend(self): - # Test with a custom legend location and size - ax = plot_cumulative_events_versus_time( - catalog_forecast=self.stochastic_event_sets, - observation=self.observation_m5, - bins=75, - show=show_plots, - figsize=(8, 5), - time_axis="days", - xlabel="Days since Mainshock", - ylabel="Cumulative Event Count", - title="Cumulative Event Counts with Custom Legend", - legend_loc="lower right", - legend_fontsize=14 - ) - - self.assertEqual(ax.get_legend()._get_loc(), 4) - self.assertEqual(ax.get_legend().get_texts()[0].get_fontsize(), 14) - - def tearDown(self): - plt.close("all") - del self.stochastic_event_sets - del self.observation_m2 - del self.observation_m5 - gc.collect() - - -class TestPlotMagnitudeHistogram(TestPlots): - - def setUp(self): - - def gr_dist(num_events, mag_min=3.0, mag_max=8.0, b_val=1.0): - U = numpy.random.uniform(0, 1, num_events) - magnitudes = mag_min - (1.0 / b_val) * numpy.log10(1 - U) - magnitudes = magnitudes[magnitudes <= mag_max] - return magnitudes - - self.mock_forecast = [MagicMock(), MagicMock(), MagicMock()] - for i in self.mock_forecast: - i.get_magnitudes.return_value = gr_dist(5000) - - self.mock_cat = MagicMock() - self.mock_cat.get_magnitudes.return_value = gr_dist(500, b_val=1.2) - self.mock_cat.get_number_of_events.return_value = 500 - self.mock_cat.region.magnitudes = numpy.arange(3.0, 8.0, 0.1) - self.save_dir = os.path.join(os.path.dirname(__file__), "artifacts", "plots") - - cat_file_m5 = os.path.join( - self.artifacts, - "example_csep2_forecasts", - "Catalog", - "catalog.json", - ) - self.comcat = catalogs.CSEPCatalog.load_json(cat_file_m5) - forecast_file = os.path.join( - self.artifacts, - "example_csep2_forecasts", - "Forecasts", - "ucerf3-landers_short.csv", - ) - - self.stochastic_event_sets = csep.load_catalog_forecast(forecast_file) - - os.makedirs(self.save_dir, exist_ok=True) - - def test_plot_magnitude_histogram_basic(self): - # Test with basic arguments - ax = plot_magnitude_histogram(self.mock_forecast, - self.mock_cat, show=show_plots, - density=True) - - # Verify that magnitudes were retrieved - for catalog in self.mock_forecast: - catalog.get_magnitudes.assert_called_once() - self.mock_cat.get_magnitudes.assert_called_once() - self.mock_cat.get_number_of_events.assert_called_once() - ax.figure.savefig(os.path.join(self.save_dir, "magnitude_histogram.png")) - - def test_plot_magnitude_histogram_ucerf(self): - # Test with basic arguments - ax = plot_magnitude_histogram(self.stochastic_event_sets, self.comcat, - show=show_plots) - - # # Verify that magnitudes were retrieved - # for catalog in self.stochastic_event_sets: - # catalog.get_magnitudes.assert_called_once() - # self.comcat.get_magnitudes.assert_called_once() - # self.comcat.get_number_of_events.assert_called_once() - ax.figure.savefig(os.path.join(self.save_dir, "magnitude_histogram_ucerf.png")) - - def tearDown(self): - plt.close("all") - gc.collect() - - -class TestPlotDistributionTests(TestPlots): - - def setUp(self): - self.result_obs_scalar = MagicMock() - self.result_obs_scalar.test_distribution = numpy.random.normal(0, 1, 1000) - self.result_obs_scalar.observed_statistic = numpy.random.rand(1)[0] - - self.result_obs_array = MagicMock() - self.result_obs_array.test_distribution = numpy.random.normal(0, 1, 1000) - self.result_obs_array.observed_statistic = numpy.random.normal(0, 1, 100) - - self.result_nan = MagicMock() - self.result_nan.test_distribution = numpy.random.normal(0, 1, 1000) - self.result_nan.observed_statistic = -numpy.inf - - # Example data for testing - n_test = os.path.join( - self.artifacts, "example_csep2_forecasts", "Results", "catalog_n_test.json" - ) - s_test = os.path.join( - self.artifacts, "example_csep2_forecasts", "Results", "catalog_s_test.json" - ) - m_test = os.path.join( - self.artifacts, "example_csep2_forecasts", "Results", "catalog_m_test.json" - ) - l_test = os.path.join( - self.artifacts, "example_csep2_forecasts", "Results", "catalog_l_test.json" - ) - - with open(n_test, "r") as fp: - self.n_test = CatalogNumberTestResult.from_dict(json.load(fp)) - with open(s_test, "r") as fp: - self.s_test = CatalogSpatialTestResult.from_dict(json.load(fp)) - with open(m_test, "r") as fp: - self.m_test = CatalogMagnitudeTestResult.from_dict(json.load(fp)) - with open(l_test, "r") as fp: - self.l_test = CatalogPseudolikelihoodTestResult.from_dict(json.load(fp)) - - def test_plot_dist_test_with_scalar_observation_default(self): - ax = plot_distribution_test( - evaluation_result=self.result_obs_scalar, - show=show_plots, - ) - - # Check if a vertical line was drawn for the scalar observation - lines = [line for line in ax.get_lines() if line.get_linestyle() == "--"] - self.assertEqual(len(lines), 1) # Expect one vertical line - self.assertEqual(lines[0].get_xdata()[0], self.result_obs_scalar.observed_statistic) - - def test_plot_dist_test_with_scalar_observation_w_labels(self): - ax = plot_distribution_test( - evaluation_result=self.result_obs_scalar, - xlabel="Test X Label", - ylabel="Test Y Label", - title="Test Title", - show=show_plots, - ) - - # Check if a vertical line was drawn for the scalar observation - lines = [line for line in ax.get_lines() if line.get_linestyle() == "--"] - self.assertEqual(len(lines), 1) # Expect one vertical line - self.assertEqual(lines[0].get_xdata()[0], self.result_obs_scalar.observed_statistic) - - def test_plot_dist_test_with_array_observation(self): - ax = plot_distribution_test( - evaluation_result=self.result_obs_array, - alpha=0.5, - show=show_plots, - ) - bars = ax.patches - self.assertTrue( - all(bar.get_alpha() == 0.5 for bar in bars), - "Alpha transparency not set correctly for bars", - ) - - def test_plot_dist_test_with_percentile_shading(self): - ax = plot_distribution_test( - evaluation_result=self.result_obs_scalar, - percentile=60, - show=show_plots, - ) - expected_red = (1.0, 0.0, 0.0) - red_patches = [] - for patch_ in ax.patches: - facecolor = patch_.get_facecolor()[:3] # Get RGB, ignore alpha - if all(abs(facecolor[i] - expected_red[i]) < 0.01 for i in range(3)): - red_patches.append(patch_) - self.assertGreater( - len(red_patches), - 0, - "Expected some patches to be colored red for percentile shading", - ) - - def test_plot_dist_test_with_annotation(self): - annotation_text = "Test Annotation" - ax = plot_distribution_test( - evaluation_result=self.result_obs_scalar, - xlabel="Test X Label", - ylabel="Test Y Label", - title="Test Title", - annotation_text=annotation_text, - annotation_xy=(0.5, 0.5), - annotation_fontsize=12, - show=show_plots, - ) - annotations = ax.texts - self.assertEqual(len(annotations), 1) - self.assertEqual(annotations[0].get_text(), annotation_text) - - def test_plot_dist_test_xlim(self): - xlim = (-5, 5) - ax = plot_distribution_test( - evaluation_result=self.result_obs_scalar, - percentile=95, - xlim=xlim, - show=show_plots, - ) - self.savefig(ax, "plot_dist_test_xlims.png") - self.assertEqual(ax.get_xlim(), xlim) - - def test_plot_dist_test_autoxlim_nan(self): - - ax = plot_distribution_test( - evaluation_result=self.result_nan, - percentile=95, - show=show_plots, - ) - self.savefig(ax, "plot_dist_test_xlims_inf.png") - - def test_plot_n_test(self): - ax = plot_distribution_test( - self.n_test, - show=show_plots, - ) - self.savefig(ax, "plot_n_test.png") - - def test_plot_m_test(self): - ax = plot_distribution_test( - self.m_test, - show=show_plots, - ) - self.savefig(ax, "plot_m_test.png") - - def test_plot_s_test(self): - ax = plot_distribution_test( - self.s_test, - show=show_plots, - ) - self.savefig(ax, "plot_s_test.png") - - def test_plot_l_test(self): - ax = plot_distribution_test( - self.l_test, - show=show_plots, - ) - self.savefig(ax, "plot_l_test.png") - - def tearDown(self): - plt.close("all") - gc.collect() - - -class TestPlotCalibrationTest(TestPlots): - - def setUp(self): - # Create a mock evaluation result with a uniform distribution - self.evaluation_result = MagicMock() - self.evaluation_result.test_distribution = numpy.random.uniform(0, 1, 1000) ** 1.3 - self.evaluation_result.sim_name = "Simulated Data" - - # Example data for testing - cal_n_test = os.path.join( - os.path.dirname(__file__), - "artifacts", - "example_csep2_forecasts", - "Results", - "calibration_n.json", - ) - cal_m_test = os.path.join( - os.path.dirname(__file__), - "artifacts", - "example_csep2_forecasts", - "Results", - "calibration_m.json", - ) - - with open(cal_n_test, "r") as fp: - self.cal_n_test = CalibrationTestResult.from_dict(json.load(fp)) - with open(cal_m_test, "r") as fp: - self.cal_m_test = CalibrationTestResult.from_dict(json.load(fp)) - - def test_plot_calibration_basic(self): - # Test with basic arguments - ax = plot_calibration_test(self.evaluation_result, show=show_plots) - # Check if the plot was created - self.assertIsInstance(ax, plt.Axes) - # Check if the confidence intervals were plotted (3 lines: pp, ulow, uhigh) - self.assertEqual(len(ax.lines), 4) - # Check if the legend was created with the correct label - legend = ax.get_legend() - self.assertIsNotNone(legend) - legend_labels = [text.get_text() for text in legend.get_texts()] - self.assertIn(self.evaluation_result.sim_name, legend_labels) - - def test_plot_calibration_test_n_test(self): - - ax = plot_calibration_test(self.cal_n_test, show=show_plots) - self.savefig(ax, "calibration_n_test.png") - legend = ax.get_legend() - self.assertIsNotNone(legend) - legend_labels = [text.get_text() for text in legend.get_texts()] - self.assertIn(self.cal_n_test.sim_name, legend_labels) - - def test_plot_calibration_test_m_test(self): - ax = plot_calibration_test(self.cal_m_test, show=show_plots) - self.savefig(ax, "calibration_m_test.png") - legend = ax.get_legend() - self.assertIsNotNone(legend) - legend_labels = [text.get_text() for text in legend.get_texts()] - self.assertIn(self.cal_m_test.sim_name, legend_labels) - - def tearDown(self): - plt.close("all") - gc.collect() - - -class TestBatchPlots(TestPlots): - def setUp(self): - # Mocking EvaluationResult for testing - self.mock_result = Mock() - self.mock_result.sim_name = "Mock Forecast" - self.mock_result.test_distribution = numpy.random.normal(loc=10, scale=2, size=100) - self.mock_result.observed_statistic = 8 - - def test_plot_consistency_basic(self): - ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots) - self.assertEqual(ax.get_title(), '') - self.assertEqual(ax.get_xlabel(), "Statistic distribution") - - def test_plot_consistency_with_multiple_results(self): - mock_results = [self.mock_result for _ in range(5)] - ax = plot_consistency_test(eval_results=mock_results, show=show_plots) - self.assertEqual(len(ax.get_yticklabels()), 5) - - def test_plot_consistency_with_normalization(self): - ax = plot_consistency_test(eval_results=self.mock_result, normalize=True, - show=show_plots) - # Assert that the observed statistic is plotted at 0 - self.assertEqual(ax.lines[0].get_xdata(), 0) - - def test_plot_consistency_with_one_sided_lower(self): - mock_result = copy.deepcopy(self.mock_result) - # THe observed statistic is placed to the right of the model test distribution. - mock_result.observed_statistic = max(self.mock_result.test_distribution) + 1 - ax = plot_consistency_test(eval_results=mock_result, one_sided_lower=True, - show=show_plots) - # The end of the infinite dashed line should extend way away from the plot limit - self.assertGreater(ax.lines[-1].get_xdata()[-1], ax.get_xlim()[1]) - - def test_plot_consistency_with_custom_percentile(self): - ax = plot_consistency_test(eval_results=self.mock_result, percentile=99, - show=show_plots) - - # Check that the line extent equals the lower 0.5 % percentile - self.assertAlmostEqual(ax.lines[2].get_xdata(), - numpy.percentile(self.mock_result.test_distribution, 0.5)) - - def test_plot_consistency_with_variance(self): - mock_nb = copy.deepcopy(self.mock_result) - mock_poisson = copy.deepcopy(self.mock_result) - mock_nb.test_distribution = ('negative_binomial', 8) - mock_poisson.test_distribution = ('poisson', 8) - ax_nb = plot_consistency_test(eval_results=mock_nb, variance=16, show=show_plots) - ax_p = plot_consistency_test(eval_results=mock_poisson, variance=None, show=show_plots) - # Ensure the negative binomial has a larger x-axis extent than poisson - self.assertTrue(ax_p.get_xlim()[1] < ax_nb.get_xlim()[1]) - - def test_plot_consistency_with_custom_plot_args(self): - ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots, - xlabel="Custom X", ylabel="Custom Y", title="Custom Title") - self.assertEqual(ax.get_xlabel(), "Custom X") - self.assertEqual(ax.get_title(), "Custom Title") - - def test_plot_consistency_with_mean(self): - ax = plot_consistency_test(eval_results=self.mock_result, plot_mean=True, - show=show_plots) - # Check for the mean line plotted as a circle - self.assertTrue(any(["o" in str(line.get_marker()) for line in ax.lines])) - - def test_SingleNTestPlot(self): - - expected_val = numpy.random.randint(0, 20) - observed_val = numpy.random.randint(0, 20) - Ntest_result = mock.Mock() - Ntest_result.name = "Mock NTest" - Ntest_result.sim_name = "Mock SimName" - Ntest_result.test_distribution = ["poisson", expected_val] - Ntest_result.observed_statistic = observed_val - matplotlib.pyplot.close() - plot_consistency_test(Ntest_result, show=show_plots) - - if not show_plots: - self.assertEqual( - [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], - [i.sim_name for i in [Ntest_result]], - ) - self.assertEqual(matplotlib.pyplot.gca().get_title(), '') - - def test_MultiNTestPlot(self): - - n_plots = numpy.random.randint(1, 20) - Ntests = [] - for n in range(n_plots): - Ntest_result = mock.Mock() - Ntest_result.name = "Mock NTest" - Ntest_result.sim_name = "".join( - random.choice(string.ascii_letters) for _ in range(8) - ) - Ntest_result.test_distribution = ["poisson", numpy.random.randint(0, 20)] - Ntest_result.observed_statistic = numpy.random.randint(0, 20) - Ntests.append(Ntest_result) - matplotlib.pyplot.close() - plot_consistency_test(Ntests, show=show_plots) - Ntests.reverse() - if not show_plots: - self.assertEqual( - [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], - [i.sim_name for i in Ntests], - ) - - def test_MultiSTestPlot(self): - - s_plots = numpy.random.randint(1, 20) - Stests = [] - for n in range(s_plots): - Stest_result = mock.Mock() # Mock class with random attributes - Stest_result.name = "Mock STest" - Stest_result.sim_name = "".join( - random.choice(string.ascii_letters) for _ in range(8) - ) - Stest_result.test_distribution = numpy.random.uniform( - -1000, 0, numpy.random.randint(3, 500) - ).tolist() - Stest_result.observed_statistic = numpy.random.uniform( - -1000, 0 - ) # random observed statistic - if numpy.random.random() < 0.02: # sim possible infinite values - Stest_result.observed_statistic = -numpy.inf - Stests.append(Stest_result) - matplotlib.pyplot.close() - plot_consistency_test(Stests) - Stests.reverse() - self.assertEqual( - [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], - [i.sim_name for i in Stests], - ) - - def test_MultiTTestPlot(self): - - for i in range(1): - t_plots = numpy.random.randint(2, 20) - t_tests = [] - - def rand(limit=10, offset=0.): - return limit * (numpy.random.random() - offset) - - for n in range(t_plots): - t_result = mock.Mock() # Mock class with random attributes - t_result.name = "CSEP1 Comparison Test" - t_result.sim_name = ( - "".join(random.choice(string.ascii_letters) for _ in range(8)), - "ref", - ) - t_result.observed_statistic = rand(offset=0.5) - t_result.test_distribution = [ - t_result.observed_statistic - rand(5), - t_result.observed_statistic + rand(5), - ] - - if numpy.random.random() < 0.05: # sim possible infinite values - t_result.observed_statistic = -numpy.inf - t_tests.append(t_result) - matplotlib.pyplot.close() - plot_comparison_test(t_tests, show=show_plots) - t_tests.reverse() - if not show_plots: - self.assertEqual( - [i.get_text() for i in matplotlib.pyplot.gca().get_xticklabels()], - [i.sim_name[0] for i in t_tests[::-1]], - ) - self.assertEqual(matplotlib.pyplot.gca().get_title(), t_tests[0].name) - - def tearDown(self): - plt.close("all") - - gc.collect() - +# class TestPlots(unittest.TestCase): +# +# def __init__(self, *args, **kwargs): +# super().__init__(*args, **kwargs) # Call the parent constructor +# # Define the save directory +# self.artifacts = os.path.join(os.path.dirname(__file__), "artifacts") +# self.save_dir = os.path.join(self.artifacts, "plots") +# os.makedirs(self.save_dir, exist_ok=True) +# +# def savefig(self, ax, name): +# ax.figure.savefig(os.path.join(self.save_dir, name)) +# +# +# class TestTimeSeriesPlots(TestPlots): +# +# def setUp(self): +# # This method is called before each test. +# # Load the stochastic event sets and observation here. +# +# cat_file_m2 = csep.datasets.comcat_example_catalog_fname +# cat_file_m5 = os.path.join( +# self.artifacts, +# "example_csep2_forecasts", +# "Catalog", +# "catalog.json", +# ) +# +# forecast_file = os.path.join( +# self.artifacts, +# "example_csep2_forecasts", +# "Forecasts", +# "ucerf3-landers_short.csv", +# ) +# +# self.stochastic_event_sets = csep.load_catalog_forecast(forecast_file) +# self.observation_m5 = catalogs.CSEPCatalog.load_json(cat_file_m5) +# self.observation_m2 = csep.load_catalog(cat_file_m2) +# +# def test_plot_magnitude_vs_time(self): +# # Basic test +# ax = plot_magnitude_vs_time(catalog=self.observation_m2, show=show_plots) +# self.assertEqual(ax.get_title(), "Magnitude vs. Time") +# self.assertEqual(ax.get_xlabel(), "Datetime") +# self.assertEqual(ax.get_ylabel(), "$M$") +# +# # Test with custom color +# ax = plot_magnitude_vs_time(catalog=self.observation_m2, color='red', show=show_plots) +# scatter_color = ax.collections[0].get_facecolor()[0] +# self.assertTrue(all(scatter_color[:3] == (1.0, 0.0, 0.0))) # Check if color is red +# +# # Test with custom marker size +# ax = plot_magnitude_vs_time(catalog=self.observation_m2, size=25, max_size=600, +# show=show_plots) +# scatter_sizes = ax.collections[0].get_sizes() +# func_sizes = _autosize_scatter(self.observation_m2.data["magnitude"], 25, 600, 4) +# numpy.testing.assert_array_almost_equal(scatter_sizes, func_sizes) +# +# # Test with custom alpha +# ax = plot_magnitude_vs_time(catalog=self.observation_m2, alpha=0.5, show=show_plots) +# scatter_alpha = ax.collections[0].get_alpha() +# self.assertEqual(scatter_alpha, 0.5) +# +# # Test with custom marker size power +# ax = plot_magnitude_vs_time(catalog=self.observation_m2, power=6, show=show_plots) +# scatter_sizes = ax.collections[0].get_sizes() +# func_sizes = _autosize_scatter(self.observation_m2.data["magnitude"], 4, 300, 6) +# numpy.testing.assert_array_almost_equal(scatter_sizes, func_sizes) +# # +# # # Test with show=True (just to ensure no errors occur) +# plot_magnitude_vs_time(catalog=self.observation_m2, show=True) +# +# def test_plot_cumulative_events_default(self): +# # Test with default arguments to ensure basic functionality +# ax = plot_cumulative_events_versus_time( +# catalog_forecast=self.stochastic_event_sets, +# observation=self.observation_m5, +# show=show_plots +# ) +# +# self.assertIsNotNone(ax.get_title()) +# self.assertIsNotNone(ax.get_xlabel()) +# self.assertIsNotNone(ax.get_ylabel()) +# +# def test_plot_cumulative_events_hours(self): +# # Test with time_axis set to 'hours' +# ax = plot_cumulative_events_versus_time( +# catalog_forecast=self.stochastic_event_sets, +# observation=self.observation_m5, +# bins=50, +# time_axis="hours", +# xlabel="Hours since Mainshock", +# ylabel="Cumulative Event Count", +# title="Cumulative Event Counts by Hour", +# legend_loc="upper left", +# show=show_plots +# ) +# +# self.assertEqual(ax.get_xlabel(), "Hours since Mainshock") +# self.assertEqual(ax.get_ylabel(), "Cumulative Event Count") +# self.assertEqual(ax.get_title(), "Cumulative Event Counts by Hour") +# +# def test_plot_cumulative_events_different_bins(self): +# # Test with different number of bins +# ax = plot_cumulative_events_versus_time( +# catalog_forecast=self.stochastic_event_sets, +# observation=self.observation_m5, +# bins=200, +# show=show_plots, +# figsize=(12, 8), +# time_axis="days", +# xlabel="Days since Mainshock", +# ylabel="Cumulative Event Count", +# title="Cumulative Event Counts with More Bins", +# legend_loc="best" +# ) +# +# self.assertEqual(ax.get_title(), "Cumulative Event Counts with More Bins") +# self.assertEqual(ax.get_xlabel(), "Days since Mainshock") +# self.assertEqual(ax.get_ylabel(), "Cumulative Event Count") +# +# def test_plot_cumulative_events_custom_legend(self): +# # Test with a custom legend location and size +# ax = plot_cumulative_events_versus_time( +# catalog_forecast=self.stochastic_event_sets, +# observation=self.observation_m5, +# bins=75, +# show=show_plots, +# figsize=(8, 5), +# time_axis="days", +# xlabel="Days since Mainshock", +# ylabel="Cumulative Event Count", +# title="Cumulative Event Counts with Custom Legend", +# legend_loc="lower right", +# legend_fontsize=14 +# ) +# +# self.assertEqual(ax.get_legend()._get_loc(), 4) +# self.assertEqual(ax.get_legend().get_texts()[0].get_fontsize(), 14) +# +# def tearDown(self): +# plt.close("all") +# del self.stochastic_event_sets +# del self.observation_m2 +# del self.observation_m5 +# gc.collect() +# +# +# class TestPlotMagnitudeHistogram(TestPlots): +# +# def setUp(self): +# +# def gr_dist(num_events, mag_min=3.0, mag_max=8.0, b_val=1.0): +# U = numpy.random.uniform(0, 1, num_events) +# magnitudes = mag_min - (1.0 / b_val) * numpy.log10(1 - U) +# magnitudes = magnitudes[magnitudes <= mag_max] +# return magnitudes +# +# self.mock_forecast = [MagicMock(), MagicMock(), MagicMock()] +# for i in self.mock_forecast: +# i.get_magnitudes.return_value = gr_dist(5000) +# +# self.mock_cat = MagicMock() +# self.mock_cat.get_magnitudes.return_value = gr_dist(500, b_val=1.2) +# self.mock_cat.get_number_of_events.return_value = 500 +# self.mock_cat.region.magnitudes = numpy.arange(3.0, 8.0, 0.1) +# self.save_dir = os.path.join(os.path.dirname(__file__), "artifacts", "plots") +# +# cat_file_m5 = os.path.join( +# self.artifacts, +# "example_csep2_forecasts", +# "Catalog", +# "catalog.json", +# ) +# self.comcat = catalogs.CSEPCatalog.load_json(cat_file_m5) +# forecast_file = os.path.join( +# self.artifacts, +# "example_csep2_forecasts", +# "Forecasts", +# "ucerf3-landers_short.csv", +# ) +# +# self.stochastic_event_sets = csep.load_catalog_forecast(forecast_file) +# +# os.makedirs(self.save_dir, exist_ok=True) +# +# def test_plot_magnitude_histogram_basic(self): +# # Test with basic arguments +# ax = plot_magnitude_histogram(self.mock_forecast, +# self.mock_cat, show=show_plots, +# density=True) +# +# # Verify that magnitudes were retrieved +# for catalog in self.mock_forecast: +# catalog.get_magnitudes.assert_called_once() +# self.mock_cat.get_magnitudes.assert_called_once() +# self.mock_cat.get_number_of_events.assert_called_once() +# ax.figure.savefig(os.path.join(self.save_dir, "magnitude_histogram.png")) +# +# def test_plot_magnitude_histogram_ucerf(self): +# # Test with basic arguments +# ax = plot_magnitude_histogram(self.stochastic_event_sets, self.comcat, +# show=show_plots) +# +# # # Verify that magnitudes were retrieved +# # for catalog in self.stochastic_event_sets: +# # catalog.get_magnitudes.assert_called_once() +# # self.comcat.get_magnitudes.assert_called_once() +# # self.comcat.get_number_of_events.assert_called_once() +# ax.figure.savefig(os.path.join(self.save_dir, "magnitude_histogram_ucerf.png")) +# +# def tearDown(self): +# plt.close("all") +# gc.collect() +# +# +# class TestPlotDistributionTests(TestPlots): +# +# def setUp(self): +# self.result_obs_scalar = MagicMock() +# self.result_obs_scalar.test_distribution = numpy.random.normal(0, 1, 1000) +# self.result_obs_scalar.observed_statistic = numpy.random.rand(1)[0] +# +# self.result_obs_array = MagicMock() +# self.result_obs_array.test_distribution = numpy.random.normal(0, 1, 1000) +# self.result_obs_array.observed_statistic = numpy.random.normal(0, 1, 100) +# +# self.result_nan = MagicMock() +# self.result_nan.test_distribution = numpy.random.normal(0, 1, 1000) +# self.result_nan.observed_statistic = -numpy.inf +# +# # Example data for testing +# n_test = os.path.join( +# self.artifacts, "example_csep2_forecasts", "Results", "catalog_n_test.json" +# ) +# s_test = os.path.join( +# self.artifacts, "example_csep2_forecasts", "Results", "catalog_s_test.json" +# ) +# m_test = os.path.join( +# self.artifacts, "example_csep2_forecasts", "Results", "catalog_m_test.json" +# ) +# l_test = os.path.join( +# self.artifacts, "example_csep2_forecasts", "Results", "catalog_l_test.json" +# ) +# +# with open(n_test, "r") as fp: +# self.n_test = CatalogNumberTestResult.from_dict(json.load(fp)) +# with open(s_test, "r") as fp: +# self.s_test = CatalogSpatialTestResult.from_dict(json.load(fp)) +# with open(m_test, "r") as fp: +# self.m_test = CatalogMagnitudeTestResult.from_dict(json.load(fp)) +# with open(l_test, "r") as fp: +# self.l_test = CatalogPseudolikelihoodTestResult.from_dict(json.load(fp)) +# +# def test_plot_dist_test_with_scalar_observation_default(self): +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_scalar, +# show=show_plots, +# ) +# +# # Check if a vertical line was drawn for the scalar observation +# lines = [line for line in ax.get_lines() if line.get_linestyle() == "--"] +# self.assertEqual(len(lines), 1) # Expect one vertical line +# self.assertEqual(lines[0].get_xdata()[0], self.result_obs_scalar.observed_statistic) +# +# def test_plot_dist_test_with_scalar_observation_w_labels(self): +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_scalar, +# xlabel="Test X Label", +# ylabel="Test Y Label", +# title="Test Title", +# show=show_plots, +# ) +# +# # Check if a vertical line was drawn for the scalar observation +# lines = [line for line in ax.get_lines() if line.get_linestyle() == "--"] +# self.assertEqual(len(lines), 1) # Expect one vertical line +# self.assertEqual(lines[0].get_xdata()[0], self.result_obs_scalar.observed_statistic) +# +# def test_plot_dist_test_with_array_observation(self): +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_array, +# alpha=0.5, +# show=show_plots, +# ) +# bars = ax.patches +# self.assertTrue( +# all(bar.get_alpha() == 0.5 for bar in bars), +# "Alpha transparency not set correctly for bars", +# ) +# +# def test_plot_dist_test_with_percentile_shading(self): +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_scalar, +# percentile=60, +# show=show_plots, +# ) +# expected_red = (1.0, 0.0, 0.0) +# red_patches = [] +# for patch_ in ax.patches: +# facecolor = patch_.get_facecolor()[:3] # Get RGB, ignore alpha +# if all(abs(facecolor[i] - expected_red[i]) < 0.01 for i in range(3)): +# red_patches.append(patch_) +# self.assertGreater( +# len(red_patches), +# 0, +# "Expected some patches to be colored red for percentile shading", +# ) +# +# def test_plot_dist_test_with_annotation(self): +# annotation_text = "Test Annotation" +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_scalar, +# xlabel="Test X Label", +# ylabel="Test Y Label", +# title="Test Title", +# annotation_text=annotation_text, +# annotation_xy=(0.5, 0.5), +# annotation_fontsize=12, +# show=show_plots, +# ) +# annotations = ax.texts +# self.assertEqual(len(annotations), 1) +# self.assertEqual(annotations[0].get_text(), annotation_text) +# +# def test_plot_dist_test_xlim(self): +# xlim = (-5, 5) +# ax = plot_distribution_test( +# evaluation_result=self.result_obs_scalar, +# percentile=95, +# xlim=xlim, +# show=show_plots, +# ) +# self.savefig(ax, "plot_dist_test_xlims.png") +# self.assertEqual(ax.get_xlim(), xlim) +# +# def test_plot_dist_test_autoxlim_nan(self): +# +# ax = plot_distribution_test( +# evaluation_result=self.result_nan, +# percentile=95, +# show=show_plots, +# ) +# self.savefig(ax, "plot_dist_test_xlims_inf.png") +# +# def test_plot_n_test(self): +# ax = plot_distribution_test( +# self.n_test, +# show=show_plots, +# ) +# self.savefig(ax, "plot_n_test.png") +# +# def test_plot_m_test(self): +# ax = plot_distribution_test( +# self.m_test, +# show=show_plots, +# ) +# self.savefig(ax, "plot_m_test.png") +# +# def test_plot_s_test(self): +# ax = plot_distribution_test( +# self.s_test, +# show=show_plots, +# ) +# self.savefig(ax, "plot_s_test.png") +# +# def test_plot_l_test(self): +# ax = plot_distribution_test( +# self.l_test, +# show=show_plots, +# ) +# self.savefig(ax, "plot_l_test.png") +# +# def tearDown(self): +# plt.close("all") +# gc.collect() +# +# +# class TestPlotCalibrationTest(TestPlots): +# +# def setUp(self): +# # Create a mock evaluation result with a uniform distribution +# self.evaluation_result = MagicMock() +# self.evaluation_result.test_distribution = numpy.random.uniform(0, 1, 1000) ** 1.3 +# self.evaluation_result.sim_name = "Simulated Data" +# +# # Example data for testing +# cal_n_test = os.path.join( +# os.path.dirname(__file__), +# "artifacts", +# "example_csep2_forecasts", +# "Results", +# "calibration_n.json", +# ) +# cal_m_test = os.path.join( +# os.path.dirname(__file__), +# "artifacts", +# "example_csep2_forecasts", +# "Results", +# "calibration_m.json", +# ) +# +# with open(cal_n_test, "r") as fp: +# self.cal_n_test = CalibrationTestResult.from_dict(json.load(fp)) +# with open(cal_m_test, "r") as fp: +# self.cal_m_test = CalibrationTestResult.from_dict(json.load(fp)) +# +# def test_plot_calibration_basic(self): +# # Test with basic arguments +# ax = plot_calibration_test(self.evaluation_result, show=show_plots) +# # Check if the plot was created +# self.assertIsInstance(ax, plt.Axes) +# # Check if the confidence intervals were plotted (3 lines: pp, ulow, uhigh) +# self.assertEqual(len(ax.lines), 4) +# # Check if the legend was created with the correct label +# legend = ax.get_legend() +# self.assertIsNotNone(legend) +# legend_labels = [text.get_text() for text in legend.get_texts()] +# self.assertIn(self.evaluation_result.sim_name, legend_labels) +# +# def test_plot_calibration_test_n_test(self): +# +# ax = plot_calibration_test(self.cal_n_test, show=show_plots) +# self.savefig(ax, "calibration_n_test.png") +# legend = ax.get_legend() +# self.assertIsNotNone(legend) +# legend_labels = [text.get_text() for text in legend.get_texts()] +# self.assertIn(self.cal_n_test.sim_name, legend_labels) +# +# def test_plot_calibration_test_m_test(self): +# ax = plot_calibration_test(self.cal_m_test, show=show_plots) +# self.savefig(ax, "calibration_m_test.png") +# legend = ax.get_legend() +# self.assertIsNotNone(legend) +# legend_labels = [text.get_text() for text in legend.get_texts()] +# self.assertIn(self.cal_m_test.sim_name, legend_labels) +# +# def tearDown(self): +# plt.close("all") +# gc.collect() +# +# +# class TestBatchPlots(TestPlots): +# def setUp(self): +# # Mocking EvaluationResult for testing +# self.mock_result = Mock() +# self.mock_result.sim_name = "Mock Forecast" +# self.mock_result.test_distribution = numpy.random.normal(loc=10, scale=2, size=100) +# self.mock_result.observed_statistic = 8 +# +# def test_plot_consistency_basic(self): +# ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots) +# self.assertEqual(ax.get_title(), '') +# self.assertEqual(ax.get_xlabel(), "Statistic distribution") +# +# def test_plot_consistency_with_multiple_results(self): +# mock_results = [self.mock_result for _ in range(5)] +# ax = plot_consistency_test(eval_results=mock_results, show=show_plots) +# self.assertEqual(len(ax.get_yticklabels()), 5) +# +# def test_plot_consistency_with_normalization(self): +# ax = plot_consistency_test(eval_results=self.mock_result, normalize=True, +# show=show_plots) +# # Assert that the observed statistic is plotted at 0 +# self.assertEqual(ax.lines[0].get_xdata(), 0) +# +# def test_plot_consistency_with_one_sided_lower(self): +# mock_result = copy.deepcopy(self.mock_result) +# # THe observed statistic is placed to the right of the model test distribution. +# mock_result.observed_statistic = max(self.mock_result.test_distribution) + 1 +# ax = plot_consistency_test(eval_results=mock_result, one_sided_lower=True, +# show=show_plots) +# # The end of the infinite dashed line should extend way away from the plot limit +# self.assertGreater(ax.lines[-1].get_xdata()[-1], ax.get_xlim()[1]) +# +# def test_plot_consistency_with_custom_percentile(self): +# ax = plot_consistency_test(eval_results=self.mock_result, percentile=99, +# show=show_plots) +# +# # Check that the line extent equals the lower 0.5 % percentile +# self.assertAlmostEqual(ax.lines[2].get_xdata(), +# numpy.percentile(self.mock_result.test_distribution, 0.5)) +# +# def test_plot_consistency_with_variance(self): +# mock_nb = copy.deepcopy(self.mock_result) +# mock_poisson = copy.deepcopy(self.mock_result) +# mock_nb.test_distribution = ('negative_binomial', 8) +# mock_poisson.test_distribution = ('poisson', 8) +# ax_nb = plot_consistency_test(eval_results=mock_nb, variance=16, show=show_plots) +# ax_p = plot_consistency_test(eval_results=mock_poisson, variance=None, show=show_plots) +# # Ensure the negative binomial has a larger x-axis extent than poisson +# self.assertTrue(ax_p.get_xlim()[1] < ax_nb.get_xlim()[1]) +# +# def test_plot_consistency_with_custom_plot_args(self): +# ax = plot_consistency_test(eval_results=self.mock_result, show=show_plots, +# xlabel="Custom X", ylabel="Custom Y", title="Custom Title") +# self.assertEqual(ax.get_xlabel(), "Custom X") +# self.assertEqual(ax.get_title(), "Custom Title") +# +# def test_plot_consistency_with_mean(self): +# ax = plot_consistency_test(eval_results=self.mock_result, plot_mean=True, +# show=show_plots) +# # Check for the mean line plotted as a circle +# self.assertTrue(any(["o" in str(line.get_marker()) for line in ax.lines])) +# +# def test_SingleNTestPlot(self): +# +# expected_val = numpy.random.randint(0, 20) +# observed_val = numpy.random.randint(0, 20) +# Ntest_result = mock.Mock() +# Ntest_result.name = "Mock NTest" +# Ntest_result.sim_name = "Mock SimName" +# Ntest_result.test_distribution = ["poisson", expected_val] +# Ntest_result.observed_statistic = observed_val +# matplotlib.pyplot.close() +# plot_consistency_test(Ntest_result, show=show_plots) +# +# if not show_plots: +# self.assertEqual( +# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], +# [i.sim_name for i in [Ntest_result]], +# ) +# self.assertEqual(matplotlib.pyplot.gca().get_title(), '') +# +# def test_MultiNTestPlot(self): +# +# n_plots = numpy.random.randint(1, 20) +# Ntests = [] +# for n in range(n_plots): +# Ntest_result = mock.Mock() +# Ntest_result.name = "Mock NTest" +# Ntest_result.sim_name = "".join( +# random.choice(string.ascii_letters) for _ in range(8) +# ) +# Ntest_result.test_distribution = ["poisson", numpy.random.randint(0, 20)] +# Ntest_result.observed_statistic = numpy.random.randint(0, 20) +# Ntests.append(Ntest_result) +# matplotlib.pyplot.close() +# plot_consistency_test(Ntests, show=show_plots) +# Ntests.reverse() +# if not show_plots: +# self.assertEqual( +# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], +# [i.sim_name for i in Ntests], +# ) +# +# def test_MultiSTestPlot(self): +# +# s_plots = numpy.random.randint(1, 20) +# Stests = [] +# for n in range(s_plots): +# Stest_result = mock.Mock() # Mock class with random attributes +# Stest_result.name = "Mock STest" +# Stest_result.sim_name = "".join( +# random.choice(string.ascii_letters) for _ in range(8) +# ) +# Stest_result.test_distribution = numpy.random.uniform( +# -1000, 0, numpy.random.randint(3, 500) +# ).tolist() +# Stest_result.observed_statistic = numpy.random.uniform( +# -1000, 0 +# ) # random observed statistic +# if numpy.random.random() < 0.02: # sim possible infinite values +# Stest_result.observed_statistic = -numpy.inf +# Stests.append(Stest_result) +# matplotlib.pyplot.close() +# plot_consistency_test(Stests) +# Stests.reverse() +# self.assertEqual( +# [i.get_text() for i in matplotlib.pyplot.gca().get_yticklabels()], +# [i.sim_name for i in Stests], +# ) +# +# def test_MultiTTestPlot(self): +# +# for i in range(1): +# t_plots = numpy.random.randint(2, 20) +# t_tests = [] +# +# def rand(limit=10, offset=0.): +# return limit * (numpy.random.random() - offset) +# +# for n in range(t_plots): +# t_result = mock.Mock() # Mock class with random attributes +# t_result.name = "CSEP1 Comparison Test" +# t_result.sim_name = ( +# "".join(random.choice(string.ascii_letters) for _ in range(8)), +# "ref", +# ) +# t_result.observed_statistic = rand(offset=0.5) +# t_result.test_distribution = [ +# t_result.observed_statistic - rand(5), +# t_result.observed_statistic + rand(5), +# ] +# +# if numpy.random.random() < 0.05: # sim possible infinite values +# t_result.observed_statistic = -numpy.inf +# t_tests.append(t_result) +# matplotlib.pyplot.close() +# plot_comparison_test(t_tests, show=show_plots) +# t_tests.reverse() +# if not show_plots: +# self.assertEqual( +# [i.get_text() for i in matplotlib.pyplot.gca().get_xticklabels()], +# [i.sim_name[0] for i in t_tests[::-1]], +# ) +# self.assertEqual(matplotlib.pyplot.gca().get_title(), t_tests[0].name) +# +# def tearDown(self): +# plt.close("all") +# +# gc.collect() +# class TestPlotBasemap(TestPlots):