diff --git a/backend/app/coordinator/routes/groups.py b/backend/app/coordinator/routes/groups.py index ed864f6..4a26590 100644 --- a/backend/app/coordinator/routes/groups.py +++ b/backend/app/coordinator/routes/groups.py @@ -10,10 +10,11 @@ from ..schemas.groups import ( GroupCreateSchema, GroupEditSchema, GroupQuerySchema, + GroupSetGradeSchema, GroupsPaginationSchema, ) from ..schemas.students import DetailGroupSchema -from ..utils import attach_points_for_first_and_second_term_to_group_models +from ..utils import attach_grade_to_group_models bp = APIBlueprint("groups", __name__, url_prefix="/groups") @@ -28,10 +29,8 @@ def list_groups(year_group_id: int, query: dict) -> dict: groups_query = Group.search_by_name(year_group_id, search_name) data = paginate_models(page, groups_query, per_page) - items = data["items"] - attach_points_for_first_and_second_term_to_group_models(items) - + attach_grade_to_group_models(items) return {"groups": items, "max_pages": data["max_pages"]} @@ -93,6 +92,7 @@ def detail_group(group_id: int) -> Group: group = Group.query.filter_by(id=group_id).first() if group is None: abort(404, "Not found group!") + attach_grade_to_group_models([group]) return group @@ -145,3 +145,22 @@ def edit_group(group_id: int, data: dict) -> dict: db.session.commit() return {"message": "Group was updated!"} + + +@bp.put("//set-grades/") +@bp.input(GroupSetGradeSchema) +@bp.output(MessageSchema) +def set_grade_for_group(group_id: int, data: dict) -> dict: + if not data: + abort(400, "You have passed empty data!") + + group_query = Group.query.filter_by(id=group_id) + group = group_query.first() + + if group is None: + abort(404, "Not found group!") + + group_query.update(data) + db.session.commit() + + return {"message": "Grade was updated!"} diff --git a/backend/app/coordinator/schemas/groups.py b/backend/app/coordinator/schemas/groups.py index 65d3588..3ef1a3d 100644 --- a/backend/app/coordinator/schemas/groups.py +++ b/backend/app/coordinator/schemas/groups.py @@ -28,3 +28,8 @@ class GroupEditSchema(Schema): class GroupIdSchema(Schema): group_id = fields.Integer(required=True) + + +class GroupSetGradeSchema(Schema): + grade_for_first_term = fields.Float() + grade_for_second_term = fields.Float() diff --git a/backend/app/coordinator/utils.py b/backend/app/coordinator/utils.py index 28b306c..2d352dd 100644 --- a/backend/app/coordinator/utils.py +++ b/backend/app/coordinator/utils.py @@ -56,6 +56,16 @@ def parse_csv( return students +def map_project_supervisors(groups: List[Group]) -> dict: + i = 1 + mapped_project_supervisors = {} + for group in groups: + if group.project_supervisor_id not in mapped_project_supervisors.keys(): + mapped_project_supervisors[group.project_supervisor_id] = i + i += 1 + return mapped_project_supervisors + + def generate_csv(students_and_groups: List[Tuple[Student, Group]]) -> str: headers = [ "INDEKS", @@ -68,6 +78,9 @@ def generate_csv(students_and_groups: List[Tuple[Student, Group]]) -> str: "GR_NR", "PRG_KOD", ] + mapped_project_supervisors_id = map_project_supervisors( + [group for _, group in students_and_groups] + ) data = [ ( student.index, @@ -77,7 +90,7 @@ def generate_csv(students_and_groups: List[Tuple[Student, Group]]) -> str: group.cdyd_kod, group.prz_kod, group.tzaj_kod, - group.project_supervisor_id, + mapped_project_supervisors_id[group.project_supervisor_id], None, ) for student, group in students_and_groups @@ -152,18 +165,17 @@ def generate_examination_schedule_pdf_file( ps = td.group.project_supervisor project_supervisor_fullname = f"{ps.first_name[0]}. {ps.last_name}" students = td.group.students - # print(students) team = ", ".join([f"{s.first_name} {s.last_name}" for s in students]) else: project_supervisor_fullname = "" team = "" members = td.members_of_committee - # print(members) + if len(members) == 0: committee = "" else: - members_iter = (f"{m.first_name[0]} {m.last_name}" for m in members) + members_iter = (f"{m.first_name[0]}. {m.last_name}" for m in members) committee = ", ".join(members_iter) data.append( @@ -226,58 +238,97 @@ def load_weight_for_project_grade_sheet() -> Union[dict, None]: return data -def calculate_points_for_one_term( - weights: dict, project_grade_sheets: List[ProjectGradeSheet] -) -> list: - terms = [] - for pgs in project_grade_sheets: - if pgs is None: - terms.append((0, 0)) - continue +def get_criterion_by_weight_key(weight_key: str) -> str: + if weight_key.startswith("presentation"): + return "presentation" + if weight_key.startswith("documentation"): + return "documentation" + if weight_key.startswith("group_work"): + return "group_work" + return "product_project" - first_term_points = { - "nominator": 0, - "denominator": 0, + +def grade_in_percentage(term_key: str, term_points: dict) -> str: + try: + criterions = { + "presentation": current_app.config.get(f"PRESENTATION_WEIGHT_{term_key}"), + "group_work": current_app.config.get(f"GROUP_WORK_WEIGHT_{term_key}"), + "documentation": current_app.config.get(f"DOCUMENTATION_WEIGHT_{term_key}"), + "product_project": current_app.config.get( + f"PRODUCT_PROJECT_WEIGHT_{term_key}" + ), } - second_term_points = { - "nominator": 0, - "denominator": 0, - } - for weight_key, weight_value in weights.items(): - points = ( - first_term_points if weight_key.endswith("1") else second_term_points + result = 0 + for criterion_key, criterion_weight in criterions.items(): + result += ( + term_points[criterion_key]["gained_points"] + / term_points[criterion_key]["all_points"] + * criterion_weight ) - try: - attribute_value = getattr(pgs, weight_key) - except AttributeError: - attribute_value = 0 - points["nominator"] += attribute_value * weight_value * 1 / 4 - points["denominator"] += weight_value + result /= sum(criterions.values()) + except ZeroDivisionError: + result = 0 + return result + +def calculate_points_for_both_terms( + weights: dict, project_grade_sheet: ProjectGradeSheet +) -> Tuple[float, float]: + if project_grade_sheet is None: + return 0.0, 0.0 + first_term_points = { + "presentation": {"gained_points": 0, "all_points": 0}, + "documentation": {"gained_points": 0, "all_points": 0}, + "group_work": {"gained_points": 0, "all_points": 0}, + "product_project": {"gained_points": 0, "all_points": 0}, + } + + second_term_points = copy.deepcopy(first_term_points) + + for weight_key, weight_value in weights.items(): + points = first_term_points if weight_key.endswith("1") else second_term_points + criterion = get_criterion_by_weight_key(weight_key) try: - fp = first_term_points["nominator"] / first_term_points["denominator"] - except ZeroDivisionError: - fp = 0 - try: - sp = second_term_points["nominator"] / second_term_points["denominator"] - except ZeroDivisionError: - sp = 0 + attribute_value = getattr(project_grade_sheet, weight_key) + except AttributeError: + attribute_value = 0 + points[criterion]["gained_points"] += attribute_value / 4 * weight_value + points[criterion]["all_points"] += weight_value - terms.append((round(fp, 2) * 100, round(sp, 2) * 100)) - - return terms + points_1 = round(grade_in_percentage("FIRST_TERM", first_term_points) * 100, 1) + points_2 = round(grade_in_percentage("SECOND_TERM", second_term_points) * 100, 1) + return points_1, points_2 -def attach_points_for_first_and_second_term_to_group_models(items: List[Group]) -> None: +def attach_points_for_first_and_second_term_to_group(group: Group) -> None: weights = load_weight_for_project_grade_sheet() - pgs = [] - for g in items: - if len(g.project_grade_sheet) == 0: - pgs.append(None) - else: - pgs.append(g.project_grade_sheet[0]) - calculated_points = calculate_points_for_one_term(weights, pgs) + pgs = group.project_grade_sheet + if len(pgs) == 0: + pgs = None + else: + pgs = pgs[0] + points = calculate_points_for_both_terms(weights, pgs) + group.points_for_first_term = points[0] + group.points_for_second_term = points[1] - for group, points in zip(items, calculated_points): - group.points_for_first_term = points[0] - group.points_for_second_term = points[1] + +def get_term_grade(point: float) -> float: + if point >= 91.0: + return 5 + if point >= 81.0: + return 4.5 + if point >= 71.0: + return 4 + if point >= 61.0: + return 3.5 + if point >= 51.0: + return 3 + return 2 + + +def attach_grade_to_group_models(groups: List[Group]) -> None: + for group in groups: + if group.grade_for_first_term == 0: + group.grade_for_first_term = get_term_grade(group.points_for_first_term) + if group.grade_for_second_term == 0: + group.grade_for_second_term = get_term_grade(group.points_for_second_term) diff --git a/backend/app/project_supervisor/query/project_grade_sheet.py b/backend/app/project_supervisor/query/project_grade_sheet.py index 6d0d6f8..1b6151d 100644 --- a/backend/app/project_supervisor/query/project_grade_sheet.py +++ b/backend/app/project_supervisor/query/project_grade_sheet.py @@ -1,11 +1,14 @@ from flask import abort +from ...coordinator.utils import attach_points_for_first_and_second_term_to_group from ...dependencies import db from ...students.models import Group, ProjectGradeSheet from ..models import ProjectSupervisor -def update_project_grade_sheet(group_id: int, query: dict, data: dict) -> None: +def update_project_grade_sheet( + group_id: int, query: dict, data: dict +) -> ProjectGradeSheet: project_supervisor_id = query.get("id") project_supervisor = ProjectSupervisor.query.filter( ProjectSupervisor.id == project_supervisor_id @@ -28,4 +31,5 @@ def update_project_grade_sheet(group_id: int, query: dict, data: dict) -> None: abort(404, "Not found project grade sheet!") pgs_query.update(data) + attach_points_for_first_and_second_term_to_group(group) db.session.commit() diff --git a/backend/app/students/models.py b/backend/app/students/models.py index bd8c5c9..a228024 100644 --- a/backend/app/students/models.py +++ b/backend/app/students/models.py @@ -38,8 +38,10 @@ class Group(Base): project_supervisor = db.relationship("ProjectSupervisor", backref="groups") year_group_id = db.Column(db.Integer, db.ForeignKey("year_groups.id")) year_group = db.relationship("YearGroup", backref="groups", lazy="joined") - points_for_first_term = db.Column(db.Integer, default=0, nullable=False) - points_for_second_term = db.Column(db.Integer, default=0, nullable=False) + points_for_first_term = db.Column(db.Float, default=0, nullable=False) + points_for_second_term = db.Column(db.Float, default=0, nullable=False) + grade_for_first_term = db.Column(db.Float, default=0, nullable=False) + grade_for_second_term = db.Column(db.Float, default=0, nullable=False) students = db.relationship( "Student", secondary=students_groups, back_populates="groups" ) diff --git a/backend/migrations/versions/559c8f18a125_.py b/backend/migrations/versions/5f2f440d05e2_.py similarity index 97% rename from backend/migrations/versions/559c8f18a125_.py rename to backend/migrations/versions/5f2f440d05e2_.py index 13cfba3..bae3880 100644 --- a/backend/migrations/versions/559c8f18a125_.py +++ b/backend/migrations/versions/5f2f440d05e2_.py @@ -1,15 +1,15 @@ """empty message -Revision ID: 559c8f18a125 +Revision ID: 5f2f440d05e2 Revises: -Create Date: 2023-01-14 15:25:59.137169 +Create Date: 2023-01-15 23:52:36.927007 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. -revision = "559c8f18a125" +revision = "5f2f440d05e2" down_revision = None branch_labels = None depends_on = None @@ -104,8 +104,10 @@ def upgrade(): sa.Column("tzaj_kod", sa.String(length=60), nullable=True), sa.Column("project_supervisor_id", sa.Integer(), nullable=True), sa.Column("year_group_id", sa.Integer(), nullable=True), - sa.Column("points_for_first_term", sa.Integer(), nullable=False), - sa.Column("points_for_second_term", sa.Integer(), nullable=False), + sa.Column("points_for_first_term", sa.Float(), nullable=False), + sa.Column("points_for_second_term", sa.Float(), nullable=False), + sa.Column("grade_for_first_term", sa.Float(), nullable=False), + sa.Column("grade_for_second_term", sa.Float(), nullable=False), sa.ForeignKeyConstraint( ["project_supervisor_id"], ["project_supervisors.id"], diff --git a/backend/tmp_data/students.csv b/backend/tests/data/students.csv similarity index 100% rename from backend/tmp_data/students.csv rename to backend/tests/data/students.csv diff --git a/backend/tmp_data/students_column_name.csv b/backend/tests/data/students_column_name.csv similarity index 100% rename from backend/tmp_data/students_column_name.csv rename to backend/tests/data/students_column_name.csv diff --git a/backend/tmp_data/students_column_type.csv b/backend/tests/data/students_column_type.csv similarity index 100% rename from backend/tmp_data/students_column_type.csv rename to backend/tests/data/students_column_type.csv diff --git a/backend/tests/unit_tests/test_utils.py b/backend/tests/unit_tests/test_utils.py index 709f1d9..b12aeb4 100644 --- a/backend/tests/unit_tests/test_utils.py +++ b/backend/tests/unit_tests/test_utils.py @@ -4,12 +4,15 @@ import pandas as pd import pytest from flask import current_app +from app.base.mode import ModeGroups from app.base.utils import is_allowed_extensions, order_by_column_name, paginate_models from app.coordinator.exceptions import InvalidNameOrTypeHeaderException from app.coordinator.utils import ( check_columns, generate_csv, generate_range_dates, + get_duration_time, + map_project_supervisors, parse_csv, ) from app.dependencies import db @@ -46,14 +49,12 @@ def test_paginate_models(test_app_ctx_with_db) -> None: index=123456, first_name="Dominic", last_name="Smith", - pesel="99010109876", email="xxx@gmail.com", ) st1 = Student( index=123457, first_name="John", last_name="Newton", - pesel="99010109871", email="zzz@gmail.com", ) db.session.add_all([st, st1]) @@ -72,7 +73,6 @@ def test_check_columns() -> None: "NAZWISKO": ["Smith"], "IMIE": ["Dominic"], "INDEKS": [343433], - "PESEL": [90020178654], "EMAIL": ["domsmi@gmail.com"], } df = pd.DataFrame(data=dummy_data) @@ -90,7 +90,6 @@ def test_check_columns_with_invalid_column_types() -> None: "NAZWISKO": [999], "IMIE": ["Dominic"], "INDEKS": [343433], - "PESEL": [90020178654], "EMAIL": ["domsmi@gmail.com"], } df = pd.DataFrame(data=dummy_data) @@ -99,13 +98,13 @@ def test_check_columns_with_invalid_column_types() -> None: def get_path_to_fake_data(filename: str) -> str: base_dir = current_app.config.get("BASE_DIR", "/") - return base_dir / "tmp_data" / filename + return base_dir / "tests" / "data" / filename def test_parse_csv(test_app) -> None: with test_app.app_context(): with open(get_path_to_fake_data("students.csv")) as f: - students = sorted(list(parse_csv(f)), key=lambda s: s.index) + students = sorted(list(parse_csv(f, 1)), key=lambda s: s.index) indexes = [452790 + i for i in range(3)] assert len(students) == len(indexes) for st, idx in zip(students, indexes): @@ -116,14 +115,14 @@ def test_parse_csv_with_invalid_column_header_name_in_csv_file(test_app) -> None with test_app.app_context(): with open(get_path_to_fake_data("students_column_name.csv")) as f: with pytest.raises(InvalidNameOrTypeHeaderException): - parse_csv(f) + parse_csv(f, 1) def test_parse_csv_with_invalid_column_type_in_csv_file(test_app) -> None: with test_app.app_context(): with open(get_path_to_fake_data("students_column_type.csv")) as f: with pytest.raises(InvalidNameOrTypeHeaderException): - parse_csv(f) + parse_csv(f, 1) def test_generate_range_dates() -> None: @@ -139,50 +138,62 @@ def test_generate_range_dates() -> None: assert start_date <= date < end_date -def test_generate_csv(test_app_ctx_with_db) -> None: +def test_generate_csv() -> None: students_data = [ { "first_name": "Dominic", "last_name": "Smith", "email": "xxe@gmail.com", "index": 123456, - "pesel": "98070234293", }, { "first_name": "Matthew", "last_name": "Cash", "email": "zze@gmail.com", "index": 123455, - "pesel": "98070234291", }, { "first_name": "Martin", "last_name": "Rose", "email": "nne@gmail.com", "index": 123446, - "pesel": "98070234223", }, ] - with test_app_ctx_with_db: - students = [Student(**data) for data in students_data] - db.session.add_all(students) - db.session.commit() + students = [Student(**data) for data in students_data] + gr1 = Group(name="new-project") + gr2 = Group(name="system-pri") + gr1.students.append(students[0]) + gr1.students.append(students[1]) + gr2.students.append(students[2]) - gr1 = Group(name="new-project") - gr2 = Group(name="system-pri") - gr1.students.append(students[0]) - gr1.students.append(students[1]) - gr2.students.append(students[2]) - db.session.add_all([gr1, gr2]) - db.session.commit() + students_and_groups = [ + (students[0], gr1), + (students[1], gr1), + (students[2], gr2), + ] + generated_csv = generate_csv(students_and_groups) + for data in students_data: + for value in data.values(): + assert str(value) in generated_csv - students_and_groups = [ - (students[0], gr1), - (students[1], gr1), - (students[2], gr2), - ] - generated_csv = generate_csv(students_and_groups) - for data in students_data: - for value in data.values(): - assert str(value) in generated_csv + +def test_map_project_supervisors() -> None: + project_supervisors_id = [(1, 2), (2, 3), (3, 7)] + groups = [] + for i in range(3): + for _, ps_id in project_supervisors_id: + groups.append(Group(project_supervisor_id=ps_id)) + + mapped_ps = map_project_supervisors( + sorted(groups, key=lambda g: g.project_supervisor_id) + ) + for expected_id, ps_id in project_supervisors_id: + assert mapped_ps[ps_id] == expected_id + + +def test_get_duration_time() -> None: + assert get_duration_time(ModeGroups.STATIONARY.value) == 30 + assert get_duration_time(ModeGroups.NON_STATIONARY.value) == 20 + assert get_duration_time(ModeGroups.ENGLISH_SPEAKING_STATIONARY.value) == 30 + assert get_duration_time("invalid value") is None