Coverage for NeuralTSNE/NeuralTSNE/TSNE/tests/test_cost_functions.py: 100%
10 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-18 16:32 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-05-18 16:32 +0000
1import pytest
2import torch
3from NeuralTSNE.TSNE.CostFunctions import CostFunctions
5from NeuralTSNE.TSNE.tests.fixtures.parametric_tsne_fixtures import (
6 default_parametric_tsne_instance,
7)
10@pytest.mark.parametrize(
11 "P, Q, expected",
12 [
13 (
14 torch.tensor([[0.1, 0.4, 0.5], [0.3, 0.25, 0.45], [0.26, 0.24, 0.5]]),
15 torch.tensor([[0.8, 0.15, 0.05], [0.1, 0.5, 0.4], [0.3, 0.4, 0.4]]),
16 torch.tensor(29.623),
17 ),
18 (
19 torch.tensor(
20 [
21 [0.0000, 0.0592, 0.0651, 0.0372, 0.0588],
22 [0.0592, 0.0000, 0.0528, 0.0382, 0.0533],
23 [0.0651, 0.0528, 0.0000, 0.0444, 0.0465],
24 [0.0372, 0.0382, 0.0444, 0.0000, 0.0446],
25 [0.0588, 0.0533, 0.0465, 0.0446, 0.0000],
26 ]
27 ),
28 torch.tensor(
29 [
30 [0.4781, 0.7788],
31 [0.8525, 0.3280],
32 [0.0730, 0.9723],
33 [0.0679, 0.1797],
34 [0.5947, 0.5116],
35 ]
36 ),
37 torch.tensor(0.0154),
38 ),
39 ],
40)
41def test_kl_divergence(
42 default_parametric_tsne_instance,
43 P: torch.tensor,
44 Q: torch.tensor,
45 expected: torch.tensor,
46):
47 tsne_instance, _ = default_parametric_tsne_instance
48 tsne_instance.batch_size = P.shape[0]
49 C = CostFunctions.kl_divergence(Q, P, {"device": "cpu", "batch_size": P.shape[0]})
51 assert torch.allclose(C, expected, rtol=1e-3)