2022-11-17 18:39:45 +01:00
|
|
|
import copy
|
2022-12-15 22:32:01 +01:00
|
|
|
import json
|
2022-06-13 00:16:07 +02:00
|
|
|
from collections import defaultdict
|
2022-11-17 20:33:20 +01:00
|
|
|
from datetime import datetime, timedelta
|
2022-11-02 23:21:08 +01:00
|
|
|
from io import BytesIO
|
2022-11-17 20:33:20 +01:00
|
|
|
from pathlib import Path
|
2023-01-14 17:38:03 +01:00
|
|
|
from typing import Any, Generator, List, TextIO, Tuple, Union
|
2022-05-17 22:16:45 +02:00
|
|
|
|
|
|
|
import pandas as pd
|
2022-12-15 22:32:01 +01:00
|
|
|
from flask import current_app
|
2022-11-02 23:21:08 +01:00
|
|
|
from reportlab.lib import colors
|
|
|
|
from reportlab.lib.enums import TA_CENTER
|
|
|
|
from reportlab.lib.styles import getSampleStyleSheet
|
2023-01-14 17:38:03 +01:00
|
|
|
from reportlab.lib.units import inch, mm
|
2022-11-17 20:33:20 +01:00
|
|
|
from reportlab.pdfbase import pdfmetrics
|
|
|
|
from reportlab.pdfbase.ttfonts import TTFont
|
2023-01-14 17:38:03 +01:00
|
|
|
from reportlab.platypus import PageBreak, Paragraph, SimpleDocTemplate, Table
|
2022-11-17 14:56:19 +01:00
|
|
|
from werkzeug.datastructures import FileStorage
|
2022-05-17 22:16:45 +02:00
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
from ..base.mode import ModeGroups
|
2022-11-17 20:33:20 +01:00
|
|
|
from ..examination_schedule.models import TermOfDefence
|
2023-01-14 17:38:03 +01:00
|
|
|
from ..students.models import Group, ProjectGradeSheet, Student
|
|
|
|
from .exceptions import InvalidNameOrTypeHeaderException
|
2022-05-17 22:16:45 +02:00
|
|
|
|
|
|
|
|
2022-05-19 18:15:11 +02:00
|
|
|
def check_columns(df: pd.DataFrame) -> bool:
|
|
|
|
headers = set(df.keys().values)
|
2023-01-14 17:38:03 +01:00
|
|
|
column_names = ["NAZWISKO", "IMIE", "INDEKS", "EMAIL"]
|
|
|
|
column_types = ["object", "object", "int", "object"]
|
|
|
|
return all((column_name in headers for column_name in column_names)) and all(
|
|
|
|
(
|
|
|
|
str(df.dtypes[column_name]).startswith(column_type)
|
|
|
|
for column_name, column_type in zip(column_names, column_types)
|
|
|
|
)
|
|
|
|
)
|
2022-06-12 21:20:11 +02:00
|
|
|
|
2022-05-19 18:15:11 +02:00
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
def parse_csv(
|
|
|
|
file: Union[FileStorage, TextIO], year_group_id: int
|
|
|
|
) -> Generator[Student, Any, None]:
|
2022-06-12 22:20:10 +02:00
|
|
|
df = pd.read_csv(file)
|
2023-01-14 01:37:31 +01:00
|
|
|
|
2022-06-13 00:16:07 +02:00
|
|
|
if not check_columns(df):
|
|
|
|
raise InvalidNameOrTypeHeaderException
|
2023-01-14 17:38:03 +01:00
|
|
|
students = (
|
|
|
|
Student(
|
|
|
|
last_name=dict(item.items())["NAZWISKO"],
|
|
|
|
first_name=dict(item.items())["IMIE"],
|
|
|
|
index=dict(item.items())["INDEKS"],
|
|
|
|
email=dict(item.items())["EMAIL"],
|
|
|
|
year_group_id=year_group_id,
|
|
|
|
)
|
|
|
|
for _, item in df.iterrows()
|
|
|
|
)
|
2022-06-12 22:20:10 +02:00
|
|
|
|
2022-05-17 22:16:45 +02:00
|
|
|
return students
|
2022-06-13 00:16:07 +02:00
|
|
|
|
|
|
|
|
2022-11-17 14:56:19 +01:00
|
|
|
def generate_csv(students_and_groups: List[Tuple[Student, Group]]) -> str:
|
2023-01-14 17:38:03 +01:00
|
|
|
headers = [
|
|
|
|
"INDEKS",
|
|
|
|
"IMIE",
|
|
|
|
"NAZWISKO",
|
|
|
|
"EMAIL",
|
|
|
|
"CDYD_KOD",
|
|
|
|
"PRZ_KOD",
|
|
|
|
"TZAJ_KOD",
|
|
|
|
"GR_NR",
|
|
|
|
"PRG_KOD",
|
|
|
|
]
|
|
|
|
data = [
|
|
|
|
(
|
|
|
|
student.index,
|
|
|
|
student.first_name,
|
|
|
|
student.last_name,
|
|
|
|
student.email,
|
|
|
|
group.cdyd_kod,
|
|
|
|
group.prz_kod,
|
|
|
|
group.tzaj_kod,
|
|
|
|
group.project_supervisor_id,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
for student, group in students_and_groups
|
|
|
|
]
|
2022-06-13 00:16:07 +02:00
|
|
|
dataframe = defaultdict(list)
|
|
|
|
for row in data:
|
|
|
|
for idx, item in enumerate(row):
|
|
|
|
dataframe[headers[idx]].append(item)
|
|
|
|
|
|
|
|
df = pd.DataFrame(dataframe)
|
|
|
|
return df.to_csv(index=False)
|
2022-11-02 23:21:08 +01:00
|
|
|
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
def generate_range_dates(
|
|
|
|
start_date: datetime, end_date: datetime, step_in_minutes: int
|
|
|
|
) -> Generator[Union[datetime, timedelta], Any, None]:
|
2022-11-17 18:39:45 +01:00
|
|
|
current_date = copy.copy(start_date)
|
|
|
|
while True:
|
|
|
|
next_date = current_date + timedelta(minutes=step_in_minutes)
|
|
|
|
|
|
|
|
if next_date > end_date:
|
|
|
|
break
|
|
|
|
yield current_date
|
|
|
|
current_date = copy.copy(next_date)
|
|
|
|
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
def generate_examination_schedule_pdf_file(
|
|
|
|
title: str, nested_term_of_defences: List[List[TermOfDefence]], base_dir: Path
|
|
|
|
) -> bytes:
|
2022-11-02 23:21:08 +01:00
|
|
|
pagesize = (297 * mm, 210 * mm)
|
2023-01-14 17:38:03 +01:00
|
|
|
headers = [
|
|
|
|
"lp.",
|
|
|
|
"Godzina",
|
|
|
|
"Nazwa projektu",
|
|
|
|
"Opiekun",
|
|
|
|
"Zespol",
|
|
|
|
"Komisja",
|
|
|
|
"Uwagi",
|
|
|
|
]
|
2022-11-02 23:21:08 +01:00
|
|
|
pdf_buffer = BytesIO()
|
|
|
|
my_doc = SimpleDocTemplate(
|
|
|
|
pdf_buffer,
|
|
|
|
pagesize=pagesize,
|
|
|
|
topMargin=1 * inch,
|
|
|
|
leftMargin=1 * inch,
|
|
|
|
rightMargin=1 * inch,
|
|
|
|
bottomMargin=1 * inch,
|
2023-01-14 17:38:03 +01:00
|
|
|
title=title,
|
2022-11-02 23:21:08 +01:00
|
|
|
)
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
pdfmetrics.registerFont(TTFont("Lato", base_dir / "fonts" / "Lato.ttf"))
|
2022-11-02 23:21:08 +01:00
|
|
|
style = getSampleStyleSheet()
|
2023-01-14 17:38:03 +01:00
|
|
|
bodyText = style["BodyText"]
|
|
|
|
bodyText.fontName = "Lato"
|
2022-11-02 23:21:08 +01:00
|
|
|
normal = style["Heading1"]
|
|
|
|
normal.alignment = TA_CENTER
|
|
|
|
flowables = []
|
|
|
|
|
|
|
|
# print(nested_enrollments)
|
2022-11-17 20:33:20 +01:00
|
|
|
for term_of_defences in nested_term_of_defences:
|
|
|
|
if len(term_of_defences) == 0:
|
2022-11-02 23:21:08 +01:00
|
|
|
continue
|
2023-01-14 17:38:03 +01:00
|
|
|
date = datetime.strftime(term_of_defences[0].start_date.date(), "%d.%m.%Y")
|
2022-11-02 23:21:08 +01:00
|
|
|
paragraph_1 = Paragraph(f"{title} ~ {date}", normal)
|
|
|
|
flowables.append(paragraph_1)
|
|
|
|
data = [headers]
|
|
|
|
|
2022-11-17 20:33:20 +01:00
|
|
|
for idx, td in enumerate(term_of_defences, start=1):
|
|
|
|
new_date = td.start_date + timedelta(hours=2)
|
|
|
|
group_name = td.group.name if td.group is not None else ""
|
2023-01-14 17:38:03 +01:00
|
|
|
if group_name != "":
|
2022-11-17 20:33:20 +01:00
|
|
|
ps = td.group.project_supervisor
|
2022-11-02 23:21:08 +01:00
|
|
|
project_supervisor_fullname = f"{ps.first_name[0]}. {ps.last_name}"
|
2022-11-17 20:33:20 +01:00
|
|
|
students = td.group.students
|
2022-11-02 23:21:08 +01:00
|
|
|
# print(students)
|
|
|
|
team = ", ".join([f"{s.first_name} {s.last_name}" for s in students])
|
|
|
|
else:
|
|
|
|
project_supervisor_fullname = ""
|
|
|
|
team = ""
|
|
|
|
|
2022-11-17 20:33:20 +01:00
|
|
|
members = td.members_of_committee
|
2022-11-02 23:21:08 +01:00
|
|
|
# print(members)
|
|
|
|
if len(members) == 0:
|
2023-01-14 17:38:03 +01:00
|
|
|
committee = ""
|
2022-11-02 23:21:08 +01:00
|
|
|
else:
|
|
|
|
members_iter = (f"{m.first_name[0]} {m.last_name}" for m in members)
|
|
|
|
committee = ", ".join(members_iter)
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
data.append(
|
|
|
|
[
|
|
|
|
str(idx),
|
|
|
|
new_date.strftime("%H:%M"),
|
|
|
|
Paragraph(group_name, bodyText),
|
|
|
|
Paragraph(project_supervisor_fullname, bodyText),
|
|
|
|
Paragraph(team, bodyText),
|
|
|
|
Paragraph(committee, bodyText),
|
|
|
|
]
|
|
|
|
)
|
2022-11-02 23:21:08 +01:00
|
|
|
# print(data)
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
table = Table(
|
|
|
|
data=data,
|
|
|
|
style=[
|
|
|
|
("GRID", (0, 0), (-1, -1), 0.5, colors.black),
|
|
|
|
("BACKGROUND", (0, 0), (-1, 0), colors.HexColor("#A6F1A6")),
|
|
|
|
("BACKGROUND", (0, 0), (1, -1), colors.HexColor("#A6F1A6")),
|
|
|
|
],
|
|
|
|
colWidths=[
|
|
|
|
0.25 * inch,
|
|
|
|
0.7 * inch,
|
|
|
|
1.6 * inch,
|
|
|
|
1.5 * inch,
|
|
|
|
2.5 * inch,
|
|
|
|
2.2 * inch,
|
|
|
|
2 * inch,
|
|
|
|
],
|
|
|
|
)
|
2022-11-02 23:21:08 +01:00
|
|
|
flowables.append(table)
|
|
|
|
flowables.append(PageBreak())
|
|
|
|
|
|
|
|
my_doc.build(flowables)
|
|
|
|
pdf_value = pdf_buffer.getvalue()
|
|
|
|
pdf_buffer.close()
|
|
|
|
return pdf_value
|
2022-12-15 22:32:01 +01:00
|
|
|
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
def get_duration_time(mode: str) -> int:
|
|
|
|
duration_time = None
|
|
|
|
if mode == ModeGroups.NON_STATIONARY.value:
|
|
|
|
duration_time = 20
|
|
|
|
elif mode in [
|
|
|
|
ModeGroups.STATIONARY.value,
|
|
|
|
ModeGroups.ENGLISH_SPEAKING_STATIONARY.value,
|
|
|
|
]:
|
|
|
|
duration_time = 30
|
|
|
|
return duration_time
|
|
|
|
|
|
|
|
|
2022-12-15 22:32:01 +01:00
|
|
|
def load_weight_for_project_grade_sheet() -> Union[dict, None]:
|
2023-01-14 17:38:03 +01:00
|
|
|
base_dir = current_app.config.get("BASE_DIR")
|
2022-12-15 22:32:01 +01:00
|
|
|
config_dir = base_dir / "config"
|
|
|
|
|
|
|
|
with open(config_dir / "weights_project_grade_sheet.json") as f:
|
|
|
|
data = json.load(f)
|
|
|
|
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2023-01-14 17:38:03 +01:00
|
|
|
def calculate_points_for_one_term(
|
|
|
|
weights: dict, project_grade_sheets: List[ProjectGradeSheet]
|
|
|
|
) -> list:
|
2022-12-15 22:32:01 +01:00
|
|
|
terms = []
|
|
|
|
for pgs in project_grade_sheets:
|
|
|
|
if pgs is None:
|
|
|
|
terms.append((0, 0))
|
|
|
|
continue
|
|
|
|
|
|
|
|
first_term_points = {
|
2023-01-14 17:38:03 +01:00
|
|
|
"nominator": 0,
|
|
|
|
"denominator": 0,
|
2022-12-15 22:32:01 +01:00
|
|
|
}
|
|
|
|
second_term_points = {
|
2023-01-14 17:38:03 +01:00
|
|
|
"nominator": 0,
|
|
|
|
"denominator": 0,
|
2022-12-15 22:32:01 +01:00
|
|
|
}
|
|
|
|
for weight_key, weight_value in weights.items():
|
2023-01-14 17:38:03 +01:00
|
|
|
points = (
|
|
|
|
first_term_points if weight_key.endswith("1") else second_term_points
|
|
|
|
)
|
2022-12-15 22:32:01 +01:00
|
|
|
try:
|
|
|
|
attribute_value = getattr(pgs, weight_key)
|
|
|
|
except AttributeError:
|
|
|
|
attribute_value = 0
|
2023-01-14 17:38:03 +01:00
|
|
|
points["nominator"] += attribute_value * weight_value * 1 / 4
|
|
|
|
points["denominator"] += weight_value
|
2022-12-15 22:32:01 +01:00
|
|
|
|
|
|
|
try:
|
2023-01-14 17:38:03 +01:00
|
|
|
fp = first_term_points["nominator"] / first_term_points["denominator"]
|
2022-12-15 22:32:01 +01:00
|
|
|
except ZeroDivisionError:
|
|
|
|
fp = 0
|
|
|
|
try:
|
2023-01-14 17:38:03 +01:00
|
|
|
sp = second_term_points["nominator"] / second_term_points["denominator"]
|
2022-12-15 22:32:01 +01:00
|
|
|
except ZeroDivisionError:
|
|
|
|
sp = 0
|
|
|
|
|
2023-01-04 22:51:58 +01:00
|
|
|
terms.append((round(fp, 2) * 100, round(sp, 2) * 100))
|
2022-12-15 22:32:01 +01:00
|
|
|
|
|
|
|
return terms
|
2023-01-08 20:26:41 +01:00
|
|
|
|
|
|
|
|
|
|
|
def attach_points_for_first_and_second_term_to_group_models(items: List[Group]) -> None:
|
|
|
|
weights = load_weight_for_project_grade_sheet()
|
|
|
|
pgs = []
|
|
|
|
for g in items:
|
|
|
|
if len(g.project_grade_sheet) == 0:
|
|
|
|
pgs.append(None)
|
|
|
|
else:
|
|
|
|
pgs.append(g.project_grade_sheet[0])
|
|
|
|
calculated_points = calculate_points_for_one_term(weights, pgs)
|
|
|
|
|
|
|
|
for group, points in zip(items, calculated_points):
|
|
|
|
group.points_for_first_term = points[0]
|
|
|
|
group.points_for_second_term = points[1]
|