Divergence domain adaptation methods.

This example illustrates the DeepCoral method from [1] on a simple image classification task.

# Author: Théo Gnassounou
#
# License: BSD 3-Clause
# sphinx_gallery_thumbnail_number = 4
from skorch import NeuralNetClassifier
from torch import nn

from skada.datasets import load_mnist_usps
from skada.deep import DeepCoral
from skada.deep.modules import MNISTtoUSPSNet

Load the image datasets

dataset = load_mnist_usps(n_classes=2, n_samples=0.5, return_dataset=True)
X, y, sample_domain = dataset.pack(
    as_sources=["mnist"], as_targets=["usps"], mask_target_labels=True
)
X_test, y_test, sample_domain_test = dataset.pack(
    as_sources=[], as_targets=["usps"], mask_target_labels=False
)
  0%|          | 0.00/9.91M [00:00<?, ?B/s]
100%|██████████| 9.91M/9.91M [00:00<00:00, 129MB/s]

  0%|          | 0.00/28.9k [00:00<?, ?B/s]
100%|██████████| 28.9k/28.9k [00:00<00:00, 44.2MB/s]

  0%|          | 0.00/1.65M [00:00<?, ?B/s]
100%|██████████| 1.65M/1.65M [00:00<00:00, 97.4MB/s]

  0%|          | 0.00/4.54k [00:00<?, ?B/s]
100%|██████████| 4.54k/4.54k [00:00<00:00, 14.7MB/s]
/home/circleci/project/skada/datasets/_mnist_usps.py:72: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor).
  mnist_target = torch.tensor(mnist_dataset.targets)

  0%|          | 0.00/1.83M [00:00<?, ?B/s]
  2%|▏         | 32.8k/1.83M [00:00<00:10, 167kB/s]
  4%|▎         | 65.5k/1.83M [00:00<00:10, 167kB/s]
  5%|▌         | 98.3k/1.83M [00:00<00:10, 167kB/s]
 13%|█▎        | 229k/1.83M [00:00<00:04, 365kB/s]
 23%|██▎       | 426k/1.83M [00:00<00:02, 595kB/s]
 48%|████▊     | 885k/1.83M [00:01<00:00, 1.19MB/s]
 97%|█████████▋| 1.77M/1.83M [00:01<00:00, 2.27MB/s]
100%|██████████| 1.83M/1.83M [00:01<00:00, 1.33MB/s]

Train a classic model

model = NeuralNetClassifier(
    MNISTtoUSPSNet(),
    criterion=nn.CrossEntropyLoss(),
    batch_size=128,
    max_epochs=5,
    train_split=False,
    lr=1e-2,
)
model.fit(X[sample_domain > 0], y[sample_domain > 0])
model.score(X_test, y=y_test)
  epoch    train_loss     dur
-------  ------------  ------
      1        1.5367  5.1982
      2        0.3111  4.7037
      3        0.1083  4.8984
      4        0.0588  4.8945
      5        0.0408  5.1000

0.9163987138263665

Train a DeepCoral model

model = DeepCoral(
    MNISTtoUSPSNet(),
    layer_name="fc1",
    batch_size=128,
    max_epochs=5,
    train_split=False,
    reg=1,
    lr=1e-2,
)
model.fit(X, y, sample_domain=sample_domain)
model.score(X_test, y_test, sample_domain=sample_domain_test)
/home/circleci/.local/lib/python3.10/site-packages/sklearn/utils/deprecation.py:132: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
  epoch    train_loss      dur
-------  ------------  -------
      1        1.6283  10.1582
      2        0.3672  9.9996
      3        0.1345  10.1960
      4        0.0843  10.2018
      5        0.0702  9.7004
/home/circleci/.local/lib/python3.10/site-packages/sklearn/utils/deprecation.py:132: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/home/circleci/.local/lib/python3.10/site-packages/sklearn/utils/deprecation.py:132: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(
/home/circleci/.local/lib/python3.10/site-packages/sklearn/utils/deprecation.py:132: FutureWarning: 'force_all_finite' was renamed to 'ensure_all_finite' in 1.6 and will be removed in 1.8.
  warnings.warn(

0.909967845659164

Total running time of the script: (1 minutes 23.974 seconds)

Gallery generated by Sphinx-Gallery