aitech-eks-pub/cw/13_transformery2_ODPOWIEDZI.ipynb
Jakub Pokrywka 3c0223d434 reformat
2021-10-05 15:04:58 +02:00

8.0 KiB
Raw Blame History

Logo 1

Ekstrakcja informacji

13. Transformery 2 [ćwiczenia]

Jakub Pokrywka (2021)

Logo 2

Wizualizacja atencji

!pip install bertviz
from transformers import AutoTokenizer, AutoModel
from bertviz import model_view, head_view
TEXT = "This is a sample input sentence for a transformer model"
MODEL = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModel.from_pretrained(MODEL, output_attentions=True)
inputs = tokenizer.encode(TEXT, return_tensors='pt')
outputs = model(inputs)
attention = outputs[-1]
tokens = tokenizer.convert_ids_to_tokens(inputs[0]) 

SELF ATTENTION MODELS

head_view(attention, tokens)
model_view(attention, tokens)

ENCODER-DECODER MODELS

MODEL = "Helsinki-NLP/opus-mt-en-de"
TEXT_ENCODER = "She sees the small elephant."
TEXT_DECODER = "Sie sieht den kleinen Elefanten."
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModel.from_pretrained(MODEL, output_attentions=True)
encoder_input_ids = tokenizer(TEXT_ENCODER, return_tensors="pt", add_special_tokens=True).input_ids
decoder_input_ids = tokenizer(TEXT_DECODER, return_tensors="pt", add_special_tokens=True).input_ids

outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)

encoder_text = tokenizer.convert_ids_to_tokens(encoder_input_ids[0])
decoder_text = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])
head_view(
    encoder_attention=outputs.encoder_attentions,
    decoder_attention=outputs.decoder_attentions,
    cross_attention=outputs.cross_attentions,
    encoder_tokens= encoder_text,
    decoder_tokens = decoder_text
)
model_view(
    encoder_attention=outputs.encoder_attentions,
    decoder_attention=outputs.decoder_attentions,
    cross_attention=outputs.cross_attentions,
    encoder_tokens= encoder_text,
    decoder_tokens = decoder_text
)

Zadanie (10 minut)

Za pomocą modelu en-fr przetłumacz dowolne zdanie z angielskiego na język francuski i sprawdź wagi atencji dla tego tłumaczenia

MODEL = "Helsinki-NLP/opus-mt-en-fr"
TEXT_ENCODER = "Although I still have fresh memories of my brother the elder Hamlets death, and though it was proper to mourn him throughout our kingdom, life still goes on—I think its wise to mourn him while also thinking about my own well being."
from transformers import AutoModelWithLMHead, AutoTokenizer

model = AutoModelWithLMHead.from_pretrained(MODEL)
tokenizer = AutoTokenizer.from_pretrained(MODEL)

inputs = tokenizer.encode(TEXT_ENCODER, return_tensors="pt")
outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True)
TEXT_DECODER = tokenizer.decode(outputs[0])
TEXT_DECODER
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModel.from_pretrained(MODEL, output_attentions=True)
encoder_input_ids = tokenizer(TEXT_ENCODER, return_tensors="pt", add_special_tokens=True).input_ids
decoder_input_ids = tokenizer(TEXT_DECODER, return_tensors="pt", add_special_tokens=True).input_ids

outputs = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids)

encoder_text = tokenizer.convert_ids_to_tokens(encoder_input_ids[0])
decoder_text = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])
head_view(
    encoder_attention=outputs.encoder_attentions,
    decoder_attention=outputs.decoder_attentions,
    cross_attention=outputs.cross_attentions,
    encoder_tokens= encoder_text,
    decoder_tokens = decoder_text
)

PRZYKŁAD: GPT3

ZADANIE DOMOWE - POLEVAL