diff --git a/.github/actions/setup-python-env/action.yml b/.github/actions/setup-python-env/action.yml index a6076550e..9e5e50bf9 100644 --- a/.github/actions/setup-python-env/action.yml +++ b/.github/actions/setup-python-env/action.yml @@ -17,7 +17,7 @@ runs: steps: - name: Set up Python id: setup-python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} cache: 'pip' diff --git a/.github/actions/setup-rendering-deps/action.yml b/.github/actions/setup-rendering-deps/action.yml index 53116d996..316327d8b 100644 --- a/.github/actions/setup-rendering-deps/action.yml +++ b/.github/actions/setup-rendering-deps/action.yml @@ -20,7 +20,7 @@ runs: steps: - name: Cache fonts id: cache-fonts - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: /usr/share/fonts/truetype/humor-sans key: fonts-${{ runner.os }}-humor-sans-v1 diff --git a/.github/workflows/publish-book-v2.yml b/.github/workflows/publish-book-v2.yml new file mode 100644 index 000000000..1b3c0c20a --- /dev/null +++ b/.github/workflows/publish-book-v2.yml @@ -0,0 +1,112 @@ +name: publish-book-v2 + +on: + workflow_dispatch: + +env: + NB_KERNEL: python + ORG: neuromatch + NMA_REPO: NeuroAI_Course + NMA_MAIN_BRANCH: main + # Empty string = serve from domain root (correct for custom domain neuroai.neuromatch.io) + BASE_URL: '' + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: pages + cancel-in-progress: false + +jobs: + build-and-deploy-book-v2: + runs-on: ubuntu-latest + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Get commit message + run: | + readonly local msg=$(git log -1 --pretty=format:"%s") + echo "COMMIT_MESSAGE=$msg" >> $GITHUB_ENV + + - name: Setup Python environment + uses: ./.github/actions/setup-python-env + + - name: Setup Node.js (required by MyST theme engine) + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install Jupyter Book 2 + run: pip install "jupyter-book>=2.1" + + - name: Setup CI tools + uses: ./.github/actions/setup-ci-tools + with: + commit-message: ${{ env.COMMIT_MESSAGE }} + stub-widgets: 'false' + + - name: Setup rendering dependencies + if: "!contains(env.COMMIT_MESSAGE, 'skip ci')" + uses: ./.github/actions/setup-rendering-deps + + - name: Get date for cache rotation + id: cache-date + run: echo "date=$(date +'%Y-%m')" >> $GITHUB_OUTPUT + + - name: Cache JB2 execution outputs + uses: actions/cache@v4 + with: + path: book/_build/execute + key: jb2-exec-v2-${{ steps.cache-date.outputs.date }}-${{ hashFiles('tutorials/**/*.ipynb', 'requirements.txt') }} + restore-keys: | + jb2-exec-v2-${{ steps.cache-date.outputs.date }}- + jb2-exec-v2- + + - name: Cache MyST theme (avoids re-download on every run) + uses: actions/cache@v4 + with: + path: book/_build/templates + key: jb2-templates-v1 + + - name: Create symlinks so book/ can resolve repo-root paths + run: | + ln -s ${{ github.workspace }}/tutorials book/tutorials + ln -s ${{ github.workspace }}/projects book/projects + ln -s ${{ github.workspace }}/prereqs book/prereqs + + - name: Generate myst.yml from materials.yml + run: python generate_book_v2.py student + + - name: Build book with Jupyter Book 2 + working-directory: book + run: jupyter book build --html --execute + env: + BASE_URL: ${{ env.BASE_URL }} + + - name: Strip error output divs from built HTML + run: python parse_build_for_errors_v2.py student + + - name: Copy CNAME for custom domain + run: cp CNAME book/_build/html/CNAME + + - name: Setup Pages + uses: actions/configure-pages@v4 + + - name: Upload HTML artifact + uses: actions/upload-pages-artifact@v3 + with: + path: book/_build/html + + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/publish-book.yml b/.github/workflows/publish-book.yml index 32b8a8039..91ecf4798 100644 --- a/.github/workflows/publish-book.yml +++ b/.github/workflows/publish-book.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -61,7 +61,7 @@ jobs: run: echo "date=$(date +'%Y-%m')" >> $GITHUB_OUTPUT - name: Cache Jupyter execution - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: book/.jupyter-cache key: jupyter-exec-${{ steps.cache-date.outputs.date }}-${{ hashFiles('tutorials/**/*.ipynb', 'requirements.txt') }} diff --git a/.gitignore b/.gitignore index 52fce67de..cdc49e55d 100755 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ .vscode .idea _build -_toc.yml \ No newline at end of file +_toc.yml +book/myst.yml \ No newline at end of file diff --git a/book/_config.yml b/book/_config.yml index 451443e20..3cbf590b2 100644 --- a/book/_config.yml +++ b/book/_config.yml @@ -15,8 +15,8 @@ only_build_toc_files : true ####################################################################################### # Execution settings execute: - execute_notebooks : force # Whether to execute notebooks at build time. Must be one of ("auto", "force", "cache", "off") - cache : "" # A path to the jupyter cache that will be used to store execution artifacts. Defaults to `_build/.jupyter_cache/` + execute_notebooks : cache # Whether to execute notebooks at build time. Must be one of ("auto", "force", "cache", "off") + cache : "book/.jupyter-cache" # A path to the jupyter cache that will be used to store execution artifacts. Defaults to `_build/.jupyter_cache/` exclude_patterns : [Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb, Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb, Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb] # A list of patterns to *skip* in execution (e.g. a notebook that takes a really long time) timeout : 360 # The maximum time (in seconds) each notebook cell is allowed to run. run_in_temp : false # If `True`, then a temporary directory will be created and used as the command working directory (cwd), diff --git a/generate_book_v2.py b/generate_book_v2.py new file mode 100644 index 000000000..2298c8ea4 --- /dev/null +++ b/generate_book_v2.py @@ -0,0 +1,467 @@ +#!/usr/bin/env python3 +""" +Generate a JB2-compatible myst.yml from tutorials/materials.yml. +In-repo replacement for nmaci's generate_book.py during the JB2 pilot. + +tutorials/materials.yml is kept as-is: it stores richer metadata than a bare +TOC (video links, bilibili links, slide links, tutorial counts) and is used by +multiple tools. This script translates it into the myst.yml build artifact. + +Run as: python ci/generate_book_v2.py student +""" + +import ast +import os +import re +import sys +import json +import yaml +from bs4 import BeautifulSoup + +ORG = os.environ.get("ORG", "neuromatch") +REPO = os.environ.get("NMA_REPO", "NeuroAI_Course") +ARG = sys.argv[1] # "student" or "instructor" + + +def main(): + with open("tutorials/materials.yml") as fh: + materials = yaml.load(fh, Loader=yaml.FullLoader) + + # Pre-process intro notebook + intro_path = "tutorials/intro.ipynb" + if os.path.exists(intro_path): + pre_process_notebook(intro_path) + + toc = [] + + # Root entry + toc.append({"file": "tutorials/intro.ipynb"}) + + # Schedule section + toc.append( + { + "title": "Schedule", + "children": [ + { + "file": "tutorials/Schedule/schedule_intro.md", + "short_title": "Overview", + "children": [ + {"file": "tutorials/Schedule/daily_schedules.md"}, + {"file": "tutorials/Schedule/shared_calendars.md"}, + {"file": "tutorials/Schedule/timezone_widget.md"}, + ], + } + ], + } + ) + + # Technical Help section + toc.append( + { + "title": "Technical Help", + "children": [ + { + "file": "tutorials/TechnicalHelp/tech_intro.md", + "short_title": "Overview", + "children": [ + { + "file": "tutorials/TechnicalHelp/Jupyterbook.md", + "short_title": "Using Jupyterbook", + "children": [ + {"file": "tutorials/TechnicalHelp/Tutorial_colab.md"}, + {"file": "tutorials/TechnicalHelp/Tutorial_kaggle.md"}, + ], + }, + {"file": "tutorials/TechnicalHelp/Discord.md"}, + ], + } + ], + } + ) + + # Links and Policy + toc.append( + { + "title": "Links & Policy", + "children": [{"file": "tutorials/TechnicalHelp/Links_Policy.md"}], + } + ) + + # Prerequisites + toc.append( + { + "title": "Prerequisites", + "children": [{"file": "prereqs/NeuroAI.md"}], + } + ) + + # Build category -> [day entries] dict (preserving materials.yml order) + categories = {} + art_file_list = os.listdir("tutorials/Art/") + + for m in materials: + category = m["category"] + if category not in categories: + categories[category] = [] + + directory = f"tutorials/{m['day']}_{''.join(m['name'].split())}" + + # Write chapter_title.md for this day (same logic as JB1 script) + title_page = f"# {m['name']}" + art_file = [fname for fname in art_file_list if m["day"] in fname] + if len(art_file) == 1: + artist = art_file[0].split("-")[1].split(".")[0].replace("_", " ") + title_page += ( + f"\n\n ````{{div}} full-width \n" + f" art relevant to chapter contents \n" + f"```` \n\n*Artwork by {artist}*" + ) + with open(f"{directory}/chapter_title.md", "w+") as fh: + fh.write(title_page) + + # Build notebook children list for this day + day_children = [] + notebook_list = [] + if os.path.exists(f"{directory}/{m['day']}_Intro.ipynb"): + notebook_list.append(f"{directory}/{ARG}/{m['day']}_Intro.ipynb") + notebook_list += [ + f"{directory}/{ARG}/{m['day']}_Tutorial{i + 1}.ipynb" + for i in range(m["tutorials"]) + ] + if os.path.exists(f"{directory}/{m['day']}_Outro.ipynb"): + notebook_list.append(f"{directory}/{ARG}/{m['day']}_Outro.ipynb") + + for nb_path in notebook_list: + day_children.append({"file": nb_path}) + pre_process_notebook(nb_path) + + day_children.append({"file": f"{directory}/further_reading.md"}) + + summary_path = f"{directory}/{ARG}/{m['day']}_DaySummary.ipynb" + if os.path.exists(summary_path): + day_children.append({"file": summary_path}) + pre_process_notebook(summary_path) + + categories[category].append( + { + "file": f"{directory}/chapter_title.md", + "title": f"{m['name']} ({m['day']})", + "children": day_children, + } + ) + + # Add Module WrapUps to their categories + for category, chapters in categories.items(): + safe_category = category.replace(" ", "") + wrapup_name = f"tutorials/Module_WrapUps/{safe_category}.ipynb" + if os.path.exists(wrapup_name): + chapters.append({"file": wrapup_name}) + + # Add category parts to TOC + for category, chapters in categories.items(): + toc.append({"title": category, "children": chapters}) + + # Professional Development + with open("projects/professional_development/prof_dev_materials.yml") as fh: + prof_dev_materials = convert_sections_to_children( + yaml.load(fh, Loader=yaml.FullLoader) + ) + toc.append({"title": "Professional Development", "children": prof_dev_materials}) + + # Project Booklet + with open("projects/project_materials.yml") as fh: + project_materials = convert_sections_to_children( + yaml.load(fh, Loader=yaml.FullLoader) + ) + toc.append({"title": "Project Booklet", "children": project_materials}) + + # Pre-process project notebooks + for m in project_materials: + if m.get("title") == "Project materials": + for project in m.get("children", []): + pre_process_notebook(project["file"]) + + # Write myst.yml (build artifact — not committed to git) + myst_config = { + "version": 1, + "project": { + "title": "Neuromatch Academy: NeuroAI", + "github": f"https://github.com/{ORG}/{REPO}", + "license": "CC-BY-4.0", + "edit_url": None, # disable: auto-computed URL gets book/ prefix from symlink + "toc": toc, + }, + "site": { + "template": "book-theme", + "domains": ["neuroai.neuromatch.io"], + "nav": [], + "actions": [{"title": "GitHub", "url": f"https://github.com/{ORG}/{REPO}"}], + "options": { + "logo": "tutorials/static/ai-logo.png", + "favicon": "tutorials/static/ai-logo.png", + "logo_text": "Neuromatch Academy: NeuroAI", + "hide_title_block": True, # notebook H1 stays in body; suppress duplicate + }, + }, + } + + with open("book/myst.yml", "w") as fh: + yaml.dump( + myst_config, + fh, + default_flow_style=False, + allow_unicode=True, + sort_keys=False, + ) + + print("Generated book/myst.yml") + + +# ---- TOC helpers ---- + + +def convert_sections_to_children(entries): + """Recursively rename JB1 'sections' keys to JB2 'children'.""" + if not entries: + return entries + result = [] + for entry in entries: + entry = dict(entry) + if "sections" in entry: + entry["children"] = convert_sections_to_children(entry.pop("sections")) + elif "children" in entry: + entry["children"] = convert_sections_to_children(entry["children"]) + result.append(entry) + return result + + +# ---- Pre-processing helpers (ported verbatim from nmaci generate_book.py) ---- + + +def pre_process_notebook(file_path): + if not os.path.exists(file_path): + print(f" Warning: {file_path} not found, skipping") + return + with open(file_path, encoding="utf-8") as fh: + content = json.load(fh) + content = open_in_colab_new_tab(content) + content = replace_widgets(content) + content = link_hidden_cells(content) + if ARG == "student": + content = tag_cells_allow_errors(content) + with open(file_path, "w", encoding="utf-8") as fh: + json.dump(content, fh, indent=1, ensure_ascii=False) + + +def replace_widgets(content): + """Replace or remove ipywidget-based cells that don't render in static HTML. + + JB2/MyST does not embed widget state, so widget cells render as "Loading..." + placeholders. This function handles three patterns: + + Video cells — detected by ``display_videos(`` + ``video_ids = [``: + Replaced with a markdown cell using MyST {tab-set}/{tab-item}/{iframe} + directives, which render natively in JB2. + + Slide cells — detected by ``link_id`` + ``osf.io``: + Replaced with a markdown cell using the MyST {iframe} directive plus a + plain download link. + + Feedback cells — detected by ``# @title Submit your feedback``: + Removed entirely (pure UI widget, no static equivalent). + """ + + def make_myst_iframe(url, width="100%"): + return f"```{{iframe}} {url}\n:width: {width}\n```" + + new_cells = [] + for cell in content["cells"]: + src = "".join(cell.get("source", [])) + + if cell["cell_type"] != "code": + new_cells.append(cell) + continue + + # --- Feedback cells: remove --- + if "# @title Submit your feedback" in src: + continue + + # --- Video cells: replace with MyST tab-set + iframes --- + if "display_videos(" in src and "video_ids = [" in src: + title_match = re.search(r"#\s*@title\s+(.*)", src) + title = title_match.group(1).strip() if title_match else "Video" + + ids_match = re.search(r"video_ids\s*=\s*(\[.*?\])", src) + video_ids = [] + if ids_match: + try: + video_ids = ast.literal_eval(ids_match.group(1)) + except (ValueError, SyntaxError): + pass + + if not video_ids: + new_cells.append(cell) + continue + + tab_items = [] + for platform, vid_id in video_ids: + if platform == "Youtube": + iframe_url = f"https://www.youtube.com/embed/{vid_id}?fs=1&rel=0" + elif platform == "Bilibili": + iframe_url = f"https://player.bilibili.com/player.html?bvid={vid_id}&page=1&autoplay=0" + else: + print( + f" Warning: unknown video platform '{platform}' (id={vid_id}), skipping" + ) + continue + tab_items.append( + f":::{{tab-item}} {platform}\n{make_myst_iframe(iframe_url)}\n:::" + ) + + myst_source = "\n\n".join( + [ + f"**{title}**", + "::::{tab-set}", + "\n".join(tab_items), + "::::", + ] + ) + + new_cells.append( + { + "cell_type": "markdown", + "metadata": {}, + "source": [myst_source], + } + ) + continue + + # --- Slide cells: replace with MyST iframe + download link --- + if "link_id" in src and "osf.io" in src: + link_id_match = re.search(r'link_id\s*=\s*["\']([^"\']+)["\']', src) + if not link_id_match: + new_cells.append(cell) + continue + + link_id = link_id_match.group(1) + download_url = f"https://osf.io/download/{link_id}/" + render_url = ( + f"https://mfr.ca-1.osf.io/render?url=https://osf.io/{link_id}/" + f"?direct%26mode=render%26action=download%26mode=render" + ) + myst_source = "\n\n".join( + [ + f"[Download slides]({download_url})", + make_myst_iframe(render_url), + ] + ) + new_cells.append( + { + "cell_type": "markdown", + "metadata": {}, + "source": [myst_source], + } + ) + continue + + new_cells.append(cell) + + content["cells"] = new_cells + return content + + +def tag_cells_allow_errors(content): + """Add raises-exception tag to every code cell. + + JB1 used allow_errors:true globally so execution continued past any error + (NotImplementedError stubs, downstream NameErrors, etc.) and error output + divs were stripped from the HTML by parse_html_for_errors.py. + + JB2 has no global allow_errors equivalent, but raises-exception on a cell + tells MyST to continue executing subsequent cells after an error. We apply + it to all code cells so that the behaviour matches JB1 exactly. A companion + post-processing script (parse_build_for_errors_v2.py) then strips the error + output divs from the built HTML before deployment. + """ + for cell in content["cells"]: + if cell["cell_type"] != "code": + continue + if "metadata" not in cell: + cell["metadata"] = {} + if "tags" not in cell["metadata"]: + cell["metadata"]["tags"] = [] + if "raises-exception" not in cell["metadata"]["tags"]: + cell["metadata"]["tags"].append("raises-exception") + return content + + +def open_in_colab_new_tab(content): + cells = content["cells"] + if not cells or not cells[0].get("source"): + return content + parsed_html = BeautifulSoup(cells[0]["source"][0], "html.parser") + for anchor in parsed_html.find_all("a"): + anchor["target"] = "_blank" + cells[0]["source"][0] = str(parsed_html) + return content + + +def link_hidden_cells(content): + cells = content["cells"] + updated_cells = cells.copy() + header_level = 1 + i_updated_cell = 0 + for i_cell, cell in enumerate(cells): + updated_cell = updated_cells[i_updated_cell] + if "source" not in cell or not cell["source"]: + i_updated_cell += 1 + continue + source = cell["source"][0] + if source.startswith("#") and cell["cell_type"] == "markdown": + header_level = source.count("#") + elif source.startswith("---") and cell["cell_type"] == "markdown": + if len(cell["source"]) > 1 and cell["source"][1].startswith("#"): + header_level = cell["source"][1].count("#") + if "@title" in source or "@markdown" in source: + if "metadata" not in cell: + updated_cell["metadata"] = {} + if "tags" not in updated_cell["metadata"]: + updated_cell["metadata"]["tags"] = [] + if "YouTubeVideo" in "".join(cell["source"]) or "IFrame" in "".join( + cell["source"] + ): + if "remove-input" not in updated_cell["metadata"]["tags"]: + updated_cell["metadata"]["tags"].append("remove-input") + else: + if "hide-input" not in updated_cell["metadata"]["tags"]: + updated_cell["metadata"]["tags"].append("hide-input") + if "@title" in source and source.split("@title")[1] != "": + header_cell = { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#" * (header_level + 1) + " " + source.split("@title")[1] + ], + } + updated_cells.insert(i_updated_cell, header_cell) + i_updated_cell += 1 + strings_with_markdown = [ + (i, s) for i, s in enumerate(cell["source"]) if "@markdown" in s + ] + if len(strings_with_markdown) == 1: + i, md_source = strings_with_markdown[0] + if md_source.split("@markdown")[1] != "": + header_cell = { + "cell_type": "markdown", + "metadata": {}, + "source": [md_source.split("@markdown")[1]], + } + updated_cells.insert(i_updated_cell, header_cell) + i_updated_cell += 1 + i_updated_cell += 1 + content["cells"] = updated_cells + return content + + +if __name__ == "__main__": + main() diff --git a/parse_build_for_errors_v2.py b/parse_build_for_errors_v2.py new file mode 100644 index 000000000..190ca88a2 --- /dev/null +++ b/parse_build_for_errors_v2.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Post-process JB2-built output to strip student exercise error outputs. + +JB1 equivalent: nmaci/scripts/parse_html_for_errors.py + +JB2/MyST renders pages as a React SPA. The browser ignores the static HTML and +re-renders everything from window.__remixContext, which is populated from the +per-page .json files in book/_build/html/. Stripping the static HTML alone has +no effect — we must strip the error outputs from the .json mdast trees. + +Error output structure in the page JSON (e.g. w1d2-tutorial2.json): + mdast.children[N].children[M] (type='outputs') + └── children[K] (type='output') + jupyter_data: {output_type: 'error', ename: 'NotImplementedError', ...} + +We walk every .json file, find 'output' nodes whose jupyter_data.ename matches +our error list, remove them from their parent 'outputs' node, and also remove +the 'outputs' node entirely if it becomes empty. + +Run as: python parse_html_for_errors_v2.py student +""" + +import json +import os +import sys + +sys.argv[1] # "student" or "instructor" — accepted but not used (kept for compat) + +ERROR_NAMES = {"NotImplementedError", "NameError"} + +HTML_ROOT = "book/_build/html" + + +def main(): + if not os.path.isdir(HTML_ROOT): + print( + f"ERROR: HTML output directory not found: {HTML_ROOT!r} (cwd={os.getcwd()!r})" + ) + sys.exit(1) + + json_files = [] + for dirpath, _dirnames, filenames in os.walk(HTML_ROOT): + for fname in filenames: + # page data files: slug.json (not index.html) + if fname.endswith(".json") and fname != "myst.xref.json": + json_files.append(os.path.join(dirpath, fname)) + + print(f"Found {len(json_files)} page JSON files under {HTML_ROOT}") + + total_removed = 0 + files_touched = 0 + + for json_path in json_files: + with open(json_path, encoding="utf-8") as f: + try: + data = json.load(f) + except json.JSONDecodeError: + continue + + mdast = data.get("mdast") + if not mdast: + continue + + removed = strip_error_outputs(mdast) + + if removed: + total_removed += removed + files_touched += 1 + with open(json_path, "w", encoding="utf-8") as f: + json.dump(data, f, separators=(",", ":")) + print(f" Stripped {removed} error output(s) from {json_path}") + + print( + f"Done. Removed {total_removed} error output(s) from {files_touched} file(s)." + ) + + +def strip_error_outputs(node): + """Recursively walk the mdast tree and remove error output nodes. + + Targets 'outputs' nodes (type='outputs') that contain one or more + 'output' children with jupyter_data.ename in ERROR_NAMES. + + Returns count of individual error output nodes removed. + """ + removed = 0 + + if not isinstance(node, dict): + return 0 + + children = node.get("children") + if isinstance(children, list): + new_children = [] + for child in children: + if isinstance(child, dict) and child.get("type") == "outputs": + # Filter out error outputs from this outputs node + kept, n = filter_error_outputs(child) + removed += n + if kept: # only keep the outputs node if it still has children + new_children.append(child) + # else: drop the now-empty outputs node entirely + else: + removed += strip_error_outputs(child) + new_children.append(child) + node["children"] = new_children + + return removed + + +def filter_error_outputs(outputs_node): + """Remove error output children from an 'outputs' node in-place. + + Returns (has_remaining_children, count_removed). + """ + removed = 0 + children = outputs_node.get("children", []) + new_children = [] + + for child in children: + if not isinstance(child, dict): + new_children.append(child) + continue + if child.get("type") != "output": + new_children.append(child) + continue + jd = child.get("jupyter_data", {}) + if ( + isinstance(jd, dict) + and jd.get("output_type") == "error" + and jd.get("ename") in ERROR_NAMES + ): + removed += 1 + else: + new_children.append(child) + + outputs_node["children"] = new_children + return bool(new_children), removed + + +if __name__ == "__main__": + main() diff --git a/tutorials/Schedule/daily_schedules.md b/tutorials/Schedule/daily_schedules.md index d1bf65d88..087736b49 100644 --- a/tutorials/Schedule/daily_schedules.md +++ b/tutorials/Schedule/daily_schedules.md @@ -42,8 +42,6 @@ All days except W1D4 and W2D5 will follow this schedule for course time: ### Project time In addition to course time, each day will have 3 hours of project time after tutorials time. -#### **Schedule Change for 2025:** For all time slots, projects will come *after* course work. - ### Schedule of specific days