From 6678aed520fa485454913364cef9c63757aaeaf6 Mon Sep 17 00:00:00 2001 From: jonnybravo Date: Mon, 18 Aug 2025 16:08:22 +0200 Subject: [PATCH] 18082025 --- imdb_parsen/imdb_top_250_movies.csv | 1 + imdb_parsen/main.py | 23 +++++++++ imdb_parsen/test.py | 48 ++++++++++++++++++ .../__pycache__/moviedb_func.cpython-313.pyc | Bin 7971 -> 8162 bytes movie-db/main.py | 1 + movie-db/movie_db.db | Bin 24576 -> 24576 bytes movie-db/moviedb_func.py | 7 +-- movie-db/templates/add_movie.html | 23 ++++++++- ollama_remote_query/ollama_prompt.py | 37 ++++++++++++++ .../output/ollama_output_1755524506.md | 6 +++ ollama_remote_query/translate_func.py | 38 ++++++++++++++ 11 files changed, 179 insertions(+), 5 deletions(-) create mode 100644 imdb_parsen/imdb_top_250_movies.csv create mode 100644 imdb_parsen/main.py create mode 100644 imdb_parsen/test.py create mode 100755 ollama_remote_query/ollama_prompt.py create mode 100644 ollama_remote_query/output/ollama_output_1755524506.md create mode 100644 ollama_remote_query/translate_func.py diff --git a/imdb_parsen/imdb_top_250_movies.csv b/imdb_parsen/imdb_top_250_movies.csv new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/imdb_parsen/imdb_top_250_movies.csv @@ -0,0 +1 @@ + diff --git a/imdb_parsen/main.py b/imdb_parsen/main.py new file mode 100644 index 0000000..17d9ebf --- /dev/null +++ b/imdb_parsen/main.py @@ -0,0 +1,23 @@ +# print(the_matrix.get('tech')) +from imdb import Cinemagoer + +# create an instance of the Cinemagoer class +ia = Cinemagoer() + +# get a movie +movie = ia.get_movie('0133093') + +# print the names of the directors of the movie +print('Directors:') +for director in movie['directors']: + print(director['name']) + +# print the genres of the movie +print('Genres:') +for genre in movie['genres']: + print(genre) + +# search for a person name +people = ia.search_person('Mel Gibson') +for person in people: + print(person.personID, person['name']) diff --git a/imdb_parsen/test.py b/imdb_parsen/test.py new file mode 100644 index 0000000..541135c --- /dev/null +++ b/imdb_parsen/test.py @@ -0,0 +1,48 @@ +from bs4 import BeautifulSoup +import requests +import re +import pandas as pd + + +# Downloading imdb top 250 movie's data +url = 'http://www.imdb.com/chart/top' +response = requests.get(url) +soup = BeautifulSoup(response.text, "html.parser") +movies = soup.select('td.titleColumn') +crew = [a.attrs.get('title') for a in soup.select('td.titleColumn a')] +ratings = [b.attrs.get('data-value') + for b in soup.select('td.posterColumn span[name=ir]')] + + +# create a empty list for storing +# movie information +list = [] + +# Iterating over movies to extract +# each movie's details +for index in range(0, len(movies)): + + # Separating movie into: 'place', + # 'title', 'year' + movie_string = movies[index].get_text() + movie = (' '.join(movie_string.split()).replace('.', '')) + movie_title = movie[len(str(index))+1:-7] + # year = re.search('\((.*?)\)', movie_string).group(1) + place = movie[:len(str(index))-(len(movie))] + data = {"place": place, + "movie_title": movie_title, + "rating": ratings[index], + # "year": year, + "star_cast": crew[index], + } + list.append(data) + +# printing movie details with its rating. +for movie in list: + print(movie['place'], '-', movie['movie_title'], + 'Starring:', movie['star_cast'], movie['rating']) + + +## .......## +df = pd.DataFrame(list) +df.to_csv('imdb_top_250_movies.csv', index=False) diff --git a/movie-db/__pycache__/moviedb_func.cpython-313.pyc b/movie-db/__pycache__/moviedb_func.cpython-313.pyc index f8aea6a8a4030fef3e8ba7ef370bddda81d561ed..f0585688976196c37929620d70462e8f76901028 100644 GIT binary patch delta 631 zcmZ2%_sE|2GcPX}0}yCB)n(*Ojen>bIoo=1tn6r>plgc&@v7#NE9V)&I9V)%j-A)?4+Fuy4ygpWgB048tB z1W~8J;H$&Hz|Ek*kj|LSs44ssB@pV+IOlGPyGYd7jLnAS#SG4kYNw9MAlXfx(3_f%zhbeFxJ7 z(-m?zI3%uf$Y11;zacEDuz~eLK=4I};0s|X7adYA6qH@$DgVaI$XUtwkr||I@tmkSnPDM>9Zxy4#gl$lp@i#a`Ya=n}cR}#=~polDB9Vz`8of#8Ky7OkF%+Hvav7q9zoWV5?!%xf%f=VCw8Cax>!h!k#M6-*< delta 437 zcmaE4zu1oVGcPX}0}xD-&dFez$ScXTg<+zGCO3aDixfi^V-Z`hz{D7RVg6t)FqbEW zH<;Thf`K7XZsI&;c|IkE7#<}CQ;-2bV9E%g6&TVP(-}1dHybdvNN{lg1(|{P^P9>0 zB&SE2vO*LIGk7R6Fq8s$1z`mhd7PmvLEI1-WHKTwq9T}G7bq0U2_l0z^jHwq_$o6n za5E?{gtDkGXmW1eDaFDhZU9uJ$x`GCq>B7Ngf@uKpL|cokdbS$fUE~=G04M{lV!CS zH&5=9HT1K#+fj4E<)VGyh2Z!L3F#MuGcIJ6T_~@+m{tFQfx(H%j|oWmGsQ7|1o7fQ zyhNs45S7PN2NFzVs%QGfz+mIRe38R^vW}br-$kC(Z_JFG>5Lzlfx?sHk%#3d7>6$K|;Adcw JDhdZm0RTM3T6O>c diff --git a/movie-db/main.py b/movie-db/main.py index f361dda..c47b7b2 100644 --- a/movie-db/main.py +++ b/movie-db/main.py @@ -19,6 +19,7 @@ def add_movie(): data=moviedb_func.all_select(), data_medium=moviedb_func.all_select(what_select="medium"), data_regie=moviedb_func.all_select(what_select="regie"), + data_all=moviedb_func.show_movie_list() ) diff --git a/movie-db/movie_db.db b/movie-db/movie_db.db index b3e9245358ad99fd8c7e359b7a9f2c030e1b59e7..cceacad525dd994183b484f9dfefd4582dd92ceb 100644 GIT binary patch delta 87 zcmV-d0I2_fzyW~30gxL3jFB8e1&jbLiW9M9pbr)a4`cuj@(   - - +
+ + + + + + + + + + {% for list_all in data_all %} + + + + + + + + {% endfor %} +
IdNameGenreRegieMedium
{{list_all.get("id")}}{{list_all.get("titel")}}{{list_all.get("genre")}}{{list_all.get("regie")}}{{list_all.get("medium")}}
+ {% endblock %} diff --git a/ollama_remote_query/ollama_prompt.py b/ollama_remote_query/ollama_prompt.py new file mode 100755 index 0000000..8ec249b --- /dev/null +++ b/ollama_remote_query/ollama_prompt.py @@ -0,0 +1,37 @@ +#! /usr/bin/env python +from ollama import Client +import datetime +import os + +output_folder = "output" + +if not os.path.exists(output_folder): + os.makedirs(output_folder) + +# Create_Timestamp +now = int(datetime.datetime.now().timestamp()) +# res = int(now.timestamp()) + +# PROMT +prompt = input("Was möchtest du Fragen: ") + +client = Client( + host='http://localhost:11434', + headers={'x-some-header': 'some-value'} +) + +response = client.chat( + model='gemma3n:e2b', + messages=[ + { + 'role': 'user', + 'content': prompt, + }], + stream=True) + +for chunk in response: + print(chunk['message']["content"], end='', flush=True) + with open(str(output_folder + "/ollama_output_" + str(now) + ".md"), "a") as dm: + print(chunk['message']["content"], end='', flush=True, file=dm) + +# with open("test.md", "w") as dm: diff --git a/ollama_remote_query/output/ollama_output_1755524506.md b/ollama_remote_query/output/ollama_output_1755524506.md new file mode 100644 index 0000000..e69d89a --- /dev/null +++ b/ollama_remote_query/output/ollama_output_1755524506.md @@ -0,0 +1,6 @@ +Okay, I'm exiting. Have a good one! + +If you need anything in the future, feel free to ask. 😊 + + + diff --git a/ollama_remote_query/translate_func.py b/ollama_remote_query/translate_func.py new file mode 100644 index 0000000..495b2e3 --- /dev/null +++ b/ollama_remote_query/translate_func.py @@ -0,0 +1,38 @@ +# %% packages +# from langchain_openai import ChatOpenAI +import langchain_ollama +from langchain_core.prompts import ChatPromptTemplate +# from dotenv import load_dotenv +from langchain_core.output_parsers import StrOutputParser +# load_dotenv('.env') + + +def translate_func(select_lang="Germany", target_lang="English", query_trans="prompt"): + if query_trans == "prompt": + query_trans = input("Was soll Übersetzt werden ? ") + # %% set up prompt template + prompt_template = ChatPromptTemplate.from_messages([ + ("system", f"You are an AI assistant that translates { + select_lang} into another language."), + ("user", "Translate this sentence: '{input}' into {target_language}"), + ]) + + # %% model + model = langchain_ollama.llms.OllamaLLM(base_url='http://localhost:11434', model="gemma3n:e2b", + temperature=0) + + # %% chain + chain = prompt_template | model | StrOutputParser() + + # %% invoke chain + res = chain.invoke({"input": query_trans, + "target_language": target_lang}) + print(res) + + # %% + + +if __name__ == "__main__": + select_lang = "Germany" + target_lang = "English" + translate_func(select_lang=target_lang, target_lang=select_lang)