Cannot complete Lab3. Import error

I checked the environment config, ml.m5.2xlarge. I had an error in the second step, during the imports:

from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM, GenerationConfig
from datasets import load_dataset
from peft import PeftModel, PeftConfig, LoraConfig, TaskType

trl: Transformer Reinforcement Learning library

from trl import PPOTrainer, PPOConfig, AutoModelForSeq2SeqLMWithValueHead
from trl import create_reference_model
from trl.core import LengthSampler

import torch
import evaluate

import numpy as np
import pandas as pd

tqdm library makes the loops show a smart progress meter.

from tqdm import tqdm
tqdm.pandas()


ModuleNotFoundError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/transformers/utils/import_utils.py in _get_module(self, module_name)
1125 try:
→ 1126 return importlib.import_module(“.” + module_name, self.name)
1127 except Exception as e:

/opt/conda/lib/python3.7/importlib/init.py in import_module(name, package)
126 level += 1
→ 127 return _bootstrap._gcd_import(name[level:], package, level)
128

/opt/conda/lib/python3.7/importlib/_bootstrap.py in _gcd_import(name, package, level)

/opt/conda/lib/python3.7/importlib/_bootstrap.py in find_and_load(name, import)

/opt/conda/lib/python3.7/importlib/_bootstrap.py in find_and_load_unlocked(name, import)

/opt/conda/lib/python3.7/importlib/_bootstrap.py in _load_unlocked(spec)

/opt/conda/lib/python3.7/importlib/_bootstrap_external.py in exec_module(self, module)

/opt/conda/lib/python3.7/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)

/opt/conda/lib/python3.7/site-packages/transformers/pipelines/init.py in
44 )
—> 45 from .audio_classification import AudioClassificationPipeline
46 from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline

/opt/conda/lib/python3.7/site-packages/transformers/pipelines/audio_classification.py in
20 from …utils import add_end_docstrings, is_torch_available, logging
—> 21 from .base import PIPELINE_INIT_ARGS, Pipeline
22

/opt/conda/lib/python3.7/site-packages/transformers/pipelines/base.py in
35 from …image_processing_utils import BaseImageProcessor
—> 36 from …modelcard import ModelCard
37 from …models.auto.configuration_auto import AutoConfig

/opt/conda/lib/python3.7/site-packages/transformers/modelcard.py in
47 )
—> 48 from .training_args import ParallelMode
49 from .utils import (

/opt/conda/lib/python3.7/site-packages/transformers/training_args.py in
59 import torch
—> 60 import torch.distributed as dist
61

ModuleNotFoundError: No module named ‘torch.distributed’

The above exception was the direct cause of the following exception:

RuntimeError Traceback (most recent call last)
in
----> 1 from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM, GenerationConfig
2 from datasets import load_dataset
3 from peft import PeftModel, PeftConfig, LoraConfig, TaskType
4
5 # trl: Transformer Reinforcement Learning library

/opt/conda/lib/python3.7/importlib/_bootstrap.py in handle_fromlist(module, fromlist, import, recursive)

/opt/conda/lib/python3.7/site-packages/transformers/utils/import_utils.py in getattr(self, name)
1114 value = self._get_module(name)
1115 elif name in self._class_to_module.keys():
→ 1116 module = self._get_module(self._class_to_module[name])
1117 value = getattr(module, name)
1118 else:

/opt/conda/lib/python3.7/site-packages/transformers/utils/import_utils.py in _get_module(self, module_name)
1129 f"Failed to import {self.name}.{module_name} because of the following error (look up to see its"
1130 f" traceback):\n{e}"
→ 1131 ) from e
1132
1133 def reduce(self):

RuntimeError: Failed to import transformers.pipelines because of the following error (look up to see its traceback):
No module named ‘torch.distributed’

I attached the Jupyter file.
{Moderator edit: Removed attached file, sharing your code is not allowed by the Code of Conduct. Mentors will contact you if we need to see your code.}

If you haven’t changed anything on the lab, would try to close the page and reopen it, I get the idea sometimes with AWS there are server issues!