Lab
Lab
def load_model():
m = gen.load("glove-wiki-gigaword-100")
return m
try:
print(f"{word}: {per:.4f}")
except KeyError as e:
def main():
model = load_model()
while True:
if word1 == 'exit':
break
main()
Lab 2
#Lab2
import numpy as np
def load_model():
print("Loading pre-trained word vectors...")
model = api.load("glove-wiki-gigaword-100")
return model
reduced_vectors = reducer.fit_transform(vectors)
plt.figure(figsize=(10, 6))
plt.show()
try:
print(f"{similar_word}: {similarity:.4f}")
except KeyError:
def main():
model = load_model()
domain_words = ["computer", "technology", "internet", "so ware", "hardware", "AI", "machine", "data", "network"]
while True:
word = input("Enter a word to find similar words (or 'exit' to quit): ").strip().lower()
if word == 'exit':
break
similar_words(model, word)
main()
Lab 3
#Lab3
import gensim
from gensim.models import Word2Vec
import nltk
legal_corpus = [
"The plain ff filed a lawsuit against the defendant for breach of contract.",
"A new legal precedent was set in the case of intellectual property rights.",
model.save("legal_word2vec.model")
model = Word2Vec.load("legal_word2vec.model")
word = "court"
if word in model.wv:
print(f"{similar_word}: {similarity:.4f}")
else:
Lab 4
import numpy as np
embedding_model = api.load("glove-wiki-gigaword-100")
words = prompt.split()
enriched_prompt = []
word_lower = word.lower()
if word_lower in embedding_model:
else:
enriched_prompt.append(word)
print("\n" + "="*80)
Lab 5
import random
word_vectors = api.load("word2vec-google-news-300")
try:
except KeyError:
return []
if len(similar_words) < 5:
sentences = [
f"The {seed_word} was known for its connec on to {similar_words[0]} and {similar_words[1]}.",
f"One day, the {seed_word} encountered a {similar_words[2]} and they became great friends.",
f"Together, they explored the world of {similar_words[3]} and discovered the wonders of {similar_words[4]}.",
f"In the end, the {seed_word} realized that life is full of surprises and adventures."
]
return " ".join(sentences)
def main():
similar = get_similar_words(seed_word)
if not similar:
return
print(paragraph)
if __name__ == "__main__":
main()
Lab 6
task="sen ment-analysis",
return result
def main():
sentences = [
"The movie was okay, not great but not bad either.",
print(f"Sentence: {sentence}")
print("-" * 60)
main()
Lab 7
model_name = "facebook/bart-large-cnn"
return summary[0]['summary_text']
if __name__ == "__main__":
passage = """
The Hugging Face Transformers library provides an easy-to-use interface for working
with pre-trained models for various NLP tasks, including summariza on.
Summariza on is the task of reducing a long text into a shorter version while
This can be par cularly useful for quickly understanding the main points of lengthy
The library supports several pre-trained models that can be used out-of-the-box for
summariza on tasks.
"""
summarized_text = summarize_text(passage)
Lab 9
import wikipediaapi
name: str
founder: str
founded_year: str
branches: str
employees: str
summary: str
wiki_wiki = wikipediaapi.Wikipedia(
user_agent="MyWikipediaScraper/1.0 (contact: [email protected])",
language="en"
if not page.exists():
if "founder" in tle:
if "founded" in line.lower():
founded_year = line.strip()
break
if "branches" in tle:
if "employees" in tle:
name=ins tu on_name,
founder=founder,
founded_year=founded_year,
branches=branches,
employees=employees,
summary=summary
if __name__ == "__main__":
print(details.model_dump_json(indent=4))
except ValueError as e:
print(str(e))