This commit is contained in:
1
imdb_parsen/imdb_top_250_movies.csv
Normal file
1
imdb_parsen/imdb_top_250_movies.csv
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
|
|
23
imdb_parsen/main.py
Normal file
23
imdb_parsen/main.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# print(the_matrix.get('tech'))
|
||||
from imdb import Cinemagoer
|
||||
|
||||
# create an instance of the Cinemagoer class
|
||||
ia = Cinemagoer()
|
||||
|
||||
# get a movie
|
||||
movie = ia.get_movie('0133093')
|
||||
|
||||
# print the names of the directors of the movie
|
||||
print('Directors:')
|
||||
for director in movie['directors']:
|
||||
print(director['name'])
|
||||
|
||||
# print the genres of the movie
|
||||
print('Genres:')
|
||||
for genre in movie['genres']:
|
||||
print(genre)
|
||||
|
||||
# search for a person name
|
||||
people = ia.search_person('Mel Gibson')
|
||||
for person in people:
|
||||
print(person.personID, person['name'])
|
||||
48
imdb_parsen/test.py
Normal file
48
imdb_parsen/test.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from bs4 import BeautifulSoup
|
||||
import requests
|
||||
import re
|
||||
import pandas as pd
|
||||
|
||||
|
||||
# Downloading imdb top 250 movie's data
|
||||
url = 'http://www.imdb.com/chart/top'
|
||||
response = requests.get(url)
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
movies = soup.select('td.titleColumn')
|
||||
crew = [a.attrs.get('title') for a in soup.select('td.titleColumn a')]
|
||||
ratings = [b.attrs.get('data-value')
|
||||
for b in soup.select('td.posterColumn span[name=ir]')]
|
||||
|
||||
|
||||
# create a empty list for storing
|
||||
# movie information
|
||||
list = []
|
||||
|
||||
# Iterating over movies to extract
|
||||
# each movie's details
|
||||
for index in range(0, len(movies)):
|
||||
|
||||
# Separating movie into: 'place',
|
||||
# 'title', 'year'
|
||||
movie_string = movies[index].get_text()
|
||||
movie = (' '.join(movie_string.split()).replace('.', ''))
|
||||
movie_title = movie[len(str(index))+1:-7]
|
||||
# year = re.search('\((.*?)\)', movie_string).group(1)
|
||||
place = movie[:len(str(index))-(len(movie))]
|
||||
data = {"place": place,
|
||||
"movie_title": movie_title,
|
||||
"rating": ratings[index],
|
||||
# "year": year,
|
||||
"star_cast": crew[index],
|
||||
}
|
||||
list.append(data)
|
||||
|
||||
# printing movie details with its rating.
|
||||
for movie in list:
|
||||
print(movie['place'], '-', movie['movie_title'],
|
||||
'Starring:', movie['star_cast'], movie['rating'])
|
||||
|
||||
|
||||
## .......##
|
||||
df = pd.DataFrame(list)
|
||||
df.to_csv('imdb_top_250_movies.csv', index=False)
|
||||
Binary file not shown.
@@ -19,6 +19,7 @@ def add_movie():
|
||||
data=moviedb_func.all_select(),
|
||||
data_medium=moviedb_func.all_select(what_select="medium"),
|
||||
data_regie=moviedb_func.all_select(what_select="regie"),
|
||||
data_all=moviedb_func.show_movie_list()
|
||||
)
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
@@ -158,9 +158,9 @@ def show_movie_list(db_name="movie_db.db"):
|
||||
db = sqlite3.connect(db_name)
|
||||
SELCET_VALUE = pd.read_sql(SQL_PARAM, db)
|
||||
return_list_dict = []
|
||||
for id, titel, genre, regie_surname, regie_lastname in zip(SELCET_VALUE["id"], SELCET_VALUE["titel"], SELCET_VALUE["genre"], SELCET_VALUE["regie_surname"], SELCET_VALUE["regie_lastname"]):
|
||||
for id, titel, genre, regie_surname, regie_lastname, medium in zip(SELCET_VALUE["id"], SELCET_VALUE["titel"], SELCET_VALUE["genre"], SELCET_VALUE["regie_surname"], SELCET_VALUE["regie_lastname"], SELCET_VALUE["medium"]):
|
||||
return_list_dict.append(
|
||||
{"id": id, "titel": titel, "genre": genre, "regie": regie_surname + " " + regie_lastname})
|
||||
{"id": id, "titel": titel, "genre": genre, "regie": regie_surname + " " + regie_lastname, "medium": medium})
|
||||
return return_list_dict
|
||||
|
||||
|
||||
@@ -171,4 +171,5 @@ if __name__ == "__main__":
|
||||
# search_name="DVD", select_from="medium", select_where="medium")
|
||||
add_movie_to_list(movie_name="Schlumpfland",
|
||||
genre_id=1, regie_id=1, medium_id=1)
|
||||
print(show_movie_list())
|
||||
for test in show_movie_list():
|
||||
print(test.get("id"), test.get("medium"))
|
||||
|
||||
@@ -25,7 +25,26 @@
|
||||
</select>
|
||||
|
||||
<button type="submit"> Eingabe </button>
|
||||
|
||||
|
||||
</form>
|
||||
<br>
|
||||
|
||||
<table border="1" width="50%">
|
||||
<tr>
|
||||
<th>Id</th>
|
||||
<th>Name</th>
|
||||
<th>Genre</th>
|
||||
<th>Regie</th>
|
||||
<th>Medium</th>
|
||||
</tr>
|
||||
{% for list_all in data_all %}
|
||||
<tr>
|
||||
<td>{{list_all.get("id")}}</td>
|
||||
<td>{{list_all.get("titel")}}</td>
|
||||
<td>{{list_all.get("genre")}}</td>
|
||||
<td>{{list_all.get("regie")}}</td>
|
||||
<td>{{list_all.get("medium")}}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
37
ollama_remote_query/ollama_prompt.py
Executable file
37
ollama_remote_query/ollama_prompt.py
Executable file
@@ -0,0 +1,37 @@
|
||||
#! /usr/bin/env python
|
||||
from ollama import Client
|
||||
import datetime
|
||||
import os
|
||||
|
||||
output_folder = "output"
|
||||
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
# Create_Timestamp
|
||||
now = int(datetime.datetime.now().timestamp())
|
||||
# res = int(now.timestamp())
|
||||
|
||||
# PROMT
|
||||
prompt = input("Was möchtest du Fragen: ")
|
||||
|
||||
client = Client(
|
||||
host='http://localhost:11434',
|
||||
headers={'x-some-header': 'some-value'}
|
||||
)
|
||||
|
||||
response = client.chat(
|
||||
model='gemma3n:e2b',
|
||||
messages=[
|
||||
{
|
||||
'role': 'user',
|
||||
'content': prompt,
|
||||
}],
|
||||
stream=True)
|
||||
|
||||
for chunk in response:
|
||||
print(chunk['message']["content"], end='', flush=True)
|
||||
with open(str(output_folder + "/ollama_output_" + str(now) + ".md"), "a") as dm:
|
||||
print(chunk['message']["content"], end='', flush=True, file=dm)
|
||||
|
||||
# with open("test.md", "w") as dm:
|
||||
6
ollama_remote_query/output/ollama_output_1755524506.md
Normal file
6
ollama_remote_query/output/ollama_output_1755524506.md
Normal file
@@ -0,0 +1,6 @@
|
||||
Okay, I'm exiting. Have a good one!
|
||||
|
||||
If you need anything in the future, feel free to ask. 😊
|
||||
|
||||
|
||||
|
||||
38
ollama_remote_query/translate_func.py
Normal file
38
ollama_remote_query/translate_func.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# %% packages
|
||||
# from langchain_openai import ChatOpenAI
|
||||
import langchain_ollama
|
||||
from langchain_core.prompts import ChatPromptTemplate
|
||||
# from dotenv import load_dotenv
|
||||
from langchain_core.output_parsers import StrOutputParser
|
||||
# load_dotenv('.env')
|
||||
|
||||
|
||||
def translate_func(select_lang="Germany", target_lang="English", query_trans="prompt"):
|
||||
if query_trans == "prompt":
|
||||
query_trans = input("Was soll Übersetzt werden ? ")
|
||||
# %% set up prompt template
|
||||
prompt_template = ChatPromptTemplate.from_messages([
|
||||
("system", f"You are an AI assistant that translates {
|
||||
select_lang} into another language."),
|
||||
("user", "Translate this sentence: '{input}' into {target_language}"),
|
||||
])
|
||||
|
||||
# %% model
|
||||
model = langchain_ollama.llms.OllamaLLM(base_url='http://localhost:11434', model="gemma3n:e2b",
|
||||
temperature=0)
|
||||
|
||||
# %% chain
|
||||
chain = prompt_template | model | StrOutputParser()
|
||||
|
||||
# %% invoke chain
|
||||
res = chain.invoke({"input": query_trans,
|
||||
"target_language": target_lang})
|
||||
print(res)
|
||||
|
||||
# %%
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
select_lang = "Germany"
|
||||
target_lang = "English"
|
||||
translate_func(select_lang=target_lang, target_lang=select_lang)
|
||||
Reference in New Issue
Block a user