模型:
cardiffnlp/twitter-roberta-base-jun2022
This is a RoBERTa-base model trained on 132.26M tweets until the end of June 2022. More details and performance scores are available in the TimeLMs paper .
Below, we provide some usage examples using the standard Transformers interface. For another interface more suited to comparing predictions and perplexity scores between models trained at different temporal intervals, check the TimeLMs repository .
For other models trained until different periods, check this table .
Replace usernames and links for placeholders: "@user" and "http". If you're interested in retaining verified users which were also retained during training, you may keep the users listed here .
def preprocess(text): preprocessed_text = [] for t in text.split(): if len(t) > 1: t = '@user' if t[0] == '@' and t.count('@') == 1 else t t = 'http' if t.startswith('http') else t preprocessed_text.append(t) return ' '.join(preprocessed_text)
from transformers import pipeline, AutoTokenizer MODEL = "cardiffnlp/twitter-roberta-base-jun2022" fill_mask = pipeline("fill-mask", model=MODEL, tokenizer=MODEL) tokenizer = AutoTokenizer.from_pretrained(MODEL) def pprint(candidates, n): for i in range(n): token = tokenizer.decode(candidates[i]['token']) score = candidates[i]['score'] print("%d) %.5f %s" % (i+1, score, token)) texts = [ "So glad I'm <mask> vaccinated.", "I keep forgetting to bring a <mask>.", "Looking forward to watching <mask> Game tonight!", ] for text in texts: t = preprocess(text) print(f"{'-'*30}\n{t}") candidates = fill_mask(t) pprint(candidates, 5)
Output:
------------------------------ So glad I'm <mask> vaccinated. 1) 0.36928 not 2) 0.29651 fully 3) 0.15332 getting 4) 0.04144 still 5) 0.01805 all ------------------------------ I keep forgetting to bring a <mask>. 1) 0.06048 book 2) 0.03458 backpack 3) 0.03362 lighter 4) 0.03162 charger 5) 0.02832 pen ------------------------------ Looking forward to watching <mask> Game tonight! 1) 0.65149 the 2) 0.14239 The 3) 0.02432 this 4) 0.00877 End 5) 0.00866 Big
from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np from scipy.spatial.distance import cosine from collections import Counter def get_embedding(text): # naive approach for demonstration text = preprocess(text) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() return np.mean(features[0], axis=0) MODEL = "cardiffnlp/twitter-roberta-base-jun2022" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModel.from_pretrained(MODEL) query = "The book was awesome" tweets = ["I just ordered fried chicken ?", "The movie was great", "What time is the next game?", "Just finished reading 'Embeddings in NLP'"] sims = Counter() for tweet in tweets: sim = 1 - cosine(get_embedding(query), get_embedding(tweet)) sims[tweet] = sim print('Most similar to: ', query) print(f"{'-'*30}") for idx, (tweet, sim) in enumerate(sims.most_common()): print("%d) %.5f %s" % (idx+1, sim, tweet))
Output:
Most similar to: The book was awesome ------------------------------ 1) 0.98882 The movie was great 2) 0.96087 Just finished reading 'Embeddings in NLP' 3) 0.95450 I just ordered fried chicken ? 4) 0.95300 What time is the next game?
from transformers import AutoTokenizer, AutoModel, TFAutoModel import numpy as np MODEL = "cardiffnlp/twitter-roberta-base-jun2022" tokenizer = AutoTokenizer.from_pretrained(MODEL) text = "Good night ?" text = preprocess(text) # Pytorch model = AutoModel.from_pretrained(MODEL) encoded_input = tokenizer(text, return_tensors='pt') features = model(**encoded_input) features = features[0].detach().cpu().numpy() features_mean = np.mean(features[0], axis=0) #features_max = np.max(features[0], axis=0) # # Tensorflow # model = TFAutoModel.from_pretrained(MODEL) # encoded_input = tokenizer(text, return_tensors='tf') # features = model(encoded_input) # features = features[0].numpy() # features_mean = np.mean(features[0], axis=0) # #features_max = np.max(features[0], axis=0)