The course material for Deploying a Model for Inference at Production Scale is outdated and does not work anymore with the models supplied by HF.
Tried to adapt the code to fix the model routing in HF, but there´s a dependency problem with python 3.8 which is too old for the new version of the HF library (via %pip install --upgrade transformers huggingface_hub)
If this is not solved, I would like a voucher back to get another course that I could finish properly.
import torch
from transformers import XLMRobertaForSequenceClassification, XLMRobertaTokenizer
from transformers import AutoTokenizer, AutoModelForSequenceClassification
R_tokenizer = AutoTokenizer.from_pretrained('joeddav/xlm-roberta-large-xnli')
# R_tokenizer = XLMRobertaTokenizer.from_pretrained('joeddav/xlm-roberta-large-xnli', token='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')
premise = "Jupiter's Biggest Moons Started as Tiny Grains of Hail"
hypothesis = 'This text is about space & cosmos'
input_ids = R_tokenizer.encode(premise, hypothesis, return_tensors='pt',
max_length=256, padding='max_length')
mask = input_ids != 1
mask = mask.long()
class PyTorch_to_TorchScript(torch.nn.Module):
def __init__(self):
super(PyTorch_to_TorchScript, self).__init__()
#self.model = XLMRobertaForSequenceClassification.from_pretrained('joeddav/xlm-roberta-large-xnli', token="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", return_dict=False)
self.model = AutoModelForSequenceClassification.from_pretrained('joeddav/xlm-roberta-large-xnli')
def forward(self, data, attention_mask=None):
return self.model(data.cuda(), attention_mask.cuda())
pt_model = PyTorch_to_TorchScript().eval().cuda()
traced_script_module = torch.jit.trace(pt_model, (input_ids, mask))
traced_script_module.save('models/huggingface-model/1/model.pt')
outputs:
ImportError Traceback (most recent call last)
/opt/conda/lib/python3.8/site-packages/transformers/utils/import_utils.py in _get_module(self, module_name)
1777 try:
→ 1778 return importlib.import_module(“.” + module_name, self.name)
1779 except Exception as e:
/opt/conda/lib/python3.8/importlib/init.py in import_module(name, package)
126 level += 1
→ 127 return _bootstrap._gcd_import(name[level:], package, level)
128
/opt/conda/lib/python3.8/importlib/_bootstrap.py in _gcd_import(name, package, level)
/opt/conda/lib/python3.8/importlib/_bootstrap.py in find_and_load(name, import)
/opt/conda/lib/python3.8/importlib/_bootstrap.py in find_and_load_unlocked(name, import)
/opt/conda/lib/python3.8/importlib/_bootstrap.py in _load_unlocked(spec)
/opt/conda/lib/python3.8/importlib/_bootstrap_external.py in exec_module(self, module)
/opt/conda/lib/python3.8/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
/opt/conda/lib/python3.8/site-packages/transformers/models/xlm_roberta/modeling_xlm_roberta.py in
42 )
—> 43 from …modeling_utils import PreTrainedModel
44 from …pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
/opt/conda/lib/python3.8/site-packages/transformers/modeling_utils.py in
47 from .integrations import PeftAdapterMixin, deepspeed_config, is_deepspeed_zero3_enabled
—> 48 from .loss.loss_utils import LOSS_MAPPING
49 from .pytorch_utils import ( # noqa: F401
/opt/conda/lib/python3.8/site-packages/transformers/loss/loss_utils.py in
18
—> 19 from .loss_deformable_detr import DeformableDetrForObjectDetectionLoss, DeformableDetrForSegmentationLoss
20 from .loss_for_object_detection import ForObjectDetectionLoss, ForSegmentationLoss
/opt/conda/lib/python3.8/site-packages/transformers/loss/loss_deformable_detr.py in
3
----> 4 from ..image_transforms import center_to_corners_format
5 from ..utils import is_scipy_available
/opt/conda/lib/python3.8/site-packages/transformers/image_transforms.py in
21
—> 22 from .image_utils import (
23 ChannelDimension,
/opt/conda/lib/python3.8/site-packages/transformers/image_utils.py in
57 if is_torchvision_available():
—> 58 from torchvision.transforms import InterpolationMode
59
ImportError: cannot import name ‘InterpolationMode’ from ‘torchvision.transforms’ (/opt/conda/lib/python3.8/site-packages/torchvision/transforms/init.py)
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
in
1 import torch
----> 2 from transformers import XLMRobertaForSequenceClassification, XLMRobertaTokenizer
3
4 from transformers import AutoTokenizer, AutoModelForSequenceClassification
5
/opt/conda/lib/python3.8/importlib/_bootstrap.py in handle_fromlist(module, fromlist, import, recursive)
/opt/conda/lib/python3.8/site-packages/transformers/utils/import_utils.py in getattr(self, name)
1765 elif name in self._class_to_module.keys():
1766 module = self._get_module(self._class_to_module[name])
→ 1767 value = getattr(module, name)
1768 elif name in self._modules:
1769 value = self._get_module(name)
/opt/conda/lib/python3.8/site-packages/transformers/utils/import_utils.py in getattr(self, name)
1764 value = Placeholder
1765 elif name in self._class_to_module.keys():
→ 1766 module = self._get_module(self._class_to_module[name])
1767 value = getattr(module, name)
1768 elif name in self._modules:
/opt/conda/lib/python3.8/site-packages/transformers/utils/import_utils.py in _get_module(self, module_name)
1778 return importlib.import_module(“.” + module_name, self.name)
1779 except Exception as e:
→ 1780 raise RuntimeError(
1781 f"Failed to import {self.name}.{module_name} because of the following error (look up to see its"
1782 f" traceback):\n{e}"
RuntimeError: Failed to import transformers.models.xlm_roberta.modeling_xlm_roberta because of the following error (look up to see its traceback):
cannot import name ‘InterpolationMode’ from ‘torchvision.transforms’ (/opt/conda/lib/python3.8/site-packages/torchvision/transforms/init.py)