#!/usr/bin/env python # creator: Silas Jelley # created: 2020-08-11 09:52:32 # updated: 2023-09-21 08:52:49 # version: 2.5 # Profiling import time import types start = time.time() def profbegin(): global profstart profstart = time.time() def profend(profmessage): profstop = time.time() profduration = round((profstop - profstart), 3) proftime = time.strftime("%H:%M:%S", time.localtime(profstop)) print(f"{proftime} {profmessage} in {profduration} seconds") # Imports profbegin() # Standard import os, re, sys, random, json, pickle, datetime from subprocess import run, CompletedProcess from hashlib import md5 from pathlib import Path from shutil import copyfile, rmtree # Dependencies: # For fonts (external): fonttools, brotli # Imports import frontmatter from jinja2 import Environment, FileSystemLoader from PIL import Image, ImageOps from pillow_heif import register_heif_opener register_heif_opener() # Site details site = { "name": "Silas Jelley", "created": "2020-08-20", "url": "https://silasjelley.com/", "baseurl": "silasjelley.com", "uid": "d0b81a46-733e-44b1-8181-61cec5681440", "description": "An unremarkable man in a breathtaking world", "creator": { "name": "Silas Jelley", "email": "reply@silasjelley.com", "born": "1996-06-09", }, "backlinks": 0, "wordcount": 0, "wordcountWithoutReferences": 0, "pagecount": 0, "references": 0, "primaries": [], "secondaries": [], "tags": [], "data": {}, } # Variables # Source paths assetsDir = "assets/" feedTemplate = "feed" feedXlsTemplate = "feed-xsl" sitemapTemplate = "sitemap" defaultTemplate = "default" templateDir = "assets/templates/" imagesDir = "assets/images/" stylesheet = "assets/styles.css" buildDepth = "production" draftsFile = "drafts/list" # Outputs buildType = os.getenv("SITE_BUILD_TYPE", "normal") if buildType == "drafts": outputDir = "/tmp/drafts.silasjelley.com/" else: outputDir = "/tmp/silasjelley.com/" # Hash stylesheet for cachebusting with open(stylesheet, "rb") as fileToHash: # read contents of the file filedata = fileToHash.read() # hash contents of the file through site["stylesheet_hash"] = md5(filedata).hexdigest() assets = {} assetManifests = list(Path("/home/silas/notes/assets/manifests").glob("**/*.json")) for manifest in assetManifests: assets.update(json.load(open(manifest))) interlinkCount = 0 # OS Environment try: verbosity = int(os.environ["SITE_BUILD_VERBOSITY"]) except KeyError: verbosity = 1 # Read BUILD and CONTENT directories from environment, change working dir journalDirectory = os.environ["JOURNAL_DIR"] notesDirectory = os.environ["NOTES_DIR"] buildDirectory = os.environ["SITE_BUILD_DIR"] startDirectory = os.getcwd() os.chdir(buildDirectory) # Jinja Environment file_loader = FileSystemLoader(templateDir) env = Environment(loader=file_loader) def filter_hash(s): return env.filters["hash"] = filter_hash def filter_shuffle(seq): try: result = list(seq) random.shuffle(result) return result except: return seq env.filters["shuffle"] = filter_shuffle def time_local(value, format="%-I:%M%p"): return value.strftime(format).lower() env.filters["time_local"] = time_local def date_year(value, format="%Y"): return value.strftime(format) env.filters["year"] = date_year def date_month(value, format="%m"): return value.strftime(format) env.filters["month"] = date_month def date_day(value, format="%d"): return value.strftime(format) env.filters["day"] = date_day def date_year_month(value, format="%Y/%m"): return value.strftime(format) env.filters["year_month"] = date_year_month def date_year_month_day(value, format="%Y/%m/%d"): return value.strftime(format) env.filters["year_month_day"] = date_year_month_day def date_long_short_month(value, format="%b %e, %Y"): return value.strftime(format) env.filters["date_long_short_month"] = date_long_short_month def datetime_w3c(value, format="%Y-%m-%dT%H:%M:%S"): return value.strftime(format) env.filters["datetime_w3c"] = datetime_w3c def date_long_full_month(value, format="%B %e, %Y"): return value.strftime(format) env.filters["date_long_full_month"] = date_long_full_month def timedate_long(value, format="%-I:%M%p %B %e, %Y"): return value.strftime(format) env.filters["timedate_long"] = timedate_long def getFiles(): fileList = [] matches = 0 if buildType == "drafts": with open(draftsFile) as draftsList: checkList = draftsList.read() fileList = checkList.splitlines() print(fileList) else: checkList = list(Path(notesDirectory).glob("**/*.md")) for filename in checkList: availableMatch = "" with open(filename, "r") as inputfile: filedata = inputfile.read() availableMatch = re.search("^available:", filedata, flags=re.MULTILINE) if availableMatch: fileList.append(filename) return fileList def processImage(inputImage, outputWidth): im = ImageOps.exif_transpose(Image.open(inputImage)) outputHeight = int(im.size[1] * float((outputWidth / float(im.size[0])))) outputImage = im.resize((outputWidth, outputHeight), Image.Resampling.LANCZOS) return {"image": outputImage, "height": outputHeight, "width": outputWidth} def processAssets(): # Process Asset Manifest for assetIdentifier, assetMetadata in assets.items(): try: sourcePath = assetMetadata["source"] with open(sourcePath) as f: pass except FileNotFoundError: raise Exception( f"Missing asset.\nThe following asset referenced in the asset manifest could not be found:\n {assetIdentifier}\n {assetMetadata['source']}" ) outputPath = os.path.join(outputDir + assetMetadata["slug"]) if assetMetadata["type"] == "data": site["data"][assetMetadata["name"]] = json.load( open(assetMetadata["source"]) ) elif assetMetadata["type"] == "image": if assetMetadata["class"] in ["photograph", "art", "screen"]: ## if image is in HEIC format, force to save as JPEG, else preserve format # if outputPath.endswith(".heic"): # outputPath = outputPath.replace(".heic", ".jpg") # if image already exists in outputDir, skip try: with open(outputPath) as f: pass # if image does not yet exist in outputDir, process and output except IOError: if verbosity > 2: print(f"{assetMetadata}: {outputPath}") sourcePath = assetMetadata["source"] os.makedirs(os.path.dirname(outputPath), exist_ok=True) if "PANO" in outputPath: compressedImage = processImage(sourcePath, 3000) else: compressedImage = processImage(sourcePath, 1200) compressedImage["image"].save(outputPath) # thumbnailPath = os.path.join( # outputDir + "thumbnails/" + assetMetadata["slug"] # ) # try: # with open(thumbnailPath) as f: # pass # # repeat for creating thumbnails # except IOError: # sourcePath = assetMetadata["source"] # os.makedirs(os.path.dirname(thumbnailPath), exist_ok=True) # thumbnailImage = processImage(sourcePath, 400) # thumbnailImage["image"].save(thumbnailPath) # All other files in the manifest are (currently) copied straight through else: # if file already exists in outputDir, skip try: with open(outputPath) as f: pass # If file does not yet exist in outputDir, process and output except IOError: source = assetMetadata["source"] if verbosity > 2: print(f" {source} >> ", end="", flush=True) os.makedirs(os.path.dirname(outputPath), exist_ok=True) copyfile(source, outputPath) if verbosity > 2: print(outputPath) # Special handling of GPX files so that they don't go stale, needs reworking elif assetMetadata["type"] == "gpx": with open(assetMetadata["source"], "rb") as fileToHash: filedata = fileToHash.read() assets[assetIdentifier]["hash"] = md5(filedata).hexdigest() source = assetMetadata["source"] if verbosity > 2: print(f" {source} >> ", end="", flush=True) os.makedirs(os.path.dirname(outputPath), exist_ok=True) copyfile(source, outputPath) if verbosity > 2: print(outputPath) else: # Copy all non 'image' assets straight through sourcePath = assetMetadata["source"] outputPath = os.path.join(outputDir + assetMetadata["slug"]) if verbosity > 2: print(f" {sourcePath} >> ", end="", flush=True) os.makedirs(os.path.dirname(outputPath), exist_ok=True) copyfile(sourcePath, outputPath) if verbosity > 2: print(outputPath) # Process image assets outside the manifest imageAssets = (f for f in Path(imagesDir).glob("**/*") if f.is_file()) for image in imageAssets: if str(image).endswith((".jpg", ".png", ".webp")): slug = os.path.relpath(image, "assets/images") outputPath = os.path.join(outputDir + slug) # if image already exists in outputDir, skip try: with open(outputPath) as f: pass # If image does not yet exist in outputDir, process and output except IOError: if verbosity > 2: print(f"{image}: {outputPath}") os.makedirs(os.path.dirname(outputPath), exist_ok=True) compressedImage = processImage(image, 1000) compressedImage["image"].save(outputPath) # # repeat for creating thumbnails # thumbnailPath = os.path.join(outputDir + "thumbnails/" + slug) # try: # with open(thumbnailPath) as f: # pass # except IOError: # os.makedirs(os.path.dirname(thumbnailPath), exist_ok=True) # thumbnailImage = processImage(image, 400) # thumbnailImage["image"].save(thumbnailPath) def preprocessMetadata(filename, metadata): metadata["filename"] = filename if "uid" not in metadata: raise Exception(f"[ERROR] Document missing UID\n {filename}") if "slug" not in metadata: raise Exception(f"[ERROR] Slug not set\n {filename}") if "created" not in metadata: raise Exception(f"[ERROR] Document missing creation date\n {filename}") if "updated" not in metadata: metadata["updated"] = metadata["created"] # Below is a temporary workaround while I transition to recording # creation/available dates as arrays rather than simple strings. # The previous form `created: 2023-09-15 19:55:46` will become the # following: # created: # date: 2023-09-15 19:55:46 # location: Livno, Bosnia # latitude: 44.30027181 # longitude: 15.85413135 # altitude: 605.1341 if buildType != "drafts": if not isinstance(metadata["available"], datetime.date): metadata["location"] = metadata["available"].get( "location", metadata["created"]["location"] ) metadata["available"] = metadata["available"]["date"] metadata["created"] = metadata["created"]["date"] if not isinstance(metadata["updated"], datetime.date): metadata["updated"] = metadata["available"] if metadata.get("source"): sourceCreator = metadata["source"].get("creator") sourceTitle = metadata["source"].get("title") # Date variable is reassigned if the more specific 'created' is available sourceDate = metadata["source"].get("year") sourceDate = metadata["source"].get("created", sourceDate) sourcePage = metadata["source"].get("page") sourceUrl = metadata["source"].get("url") if sourceUrl is not None: # closing parentheses are replaced with their encoded form so that URLs don't break the markdown/djot when they include parentheses sourceUrl = sourceUrl.replace(")", "%29") attrRich = "" attrPlain = "" if sourceCreator is not None: attrRich = attrPlain = sourceCreator if sourceTitle is not None and sourceUrl is not None: attrRich = f"{sourceCreator}, [{sourceTitle}]({sourceUrl})" attrPlain = f"{sourceCreator}, {sourceTitle}" elif sourceTitle is not None and sourceUrl is None: attrRich = attrPlain = f"{sourceCreator}, {sourceTitle}" elif sourceTitle is None and sourceUrl is not None: attrRich = f"[{sourceCreator}]({sourceUrl})" elif sourceTitle is not None: attrRich = attrPlain = sourceTitle if sourceTitle is not None and sourceUrl is not None: attrRich = f"[{sourceTitle}]({sourceUrl})" elif sourceTitle is None and sourceUrl is not None: attrRich = f"[link]({sourceUrl})" if sourcePage is not None: attrRich += f", Page {sourcePage}" if sourceDate is not None: if isinstance(sourceDate, datetime.date): sourceDate = sourceDate.year attrRich += f", {sourceDate}" # Add via link only after title has been altered if metadata.get("via") and metadata["via"].get("url"): viaUrl = metadata["via"]["url"] viaUrl = viaUrl.replace(")", "%29") attrRich += f" ([via]({viaUrl}))" attrRich = f"— {attrRich}" attrRich = run("jotdown", input=attrRich, text=True, capture_output=True).stdout metadata["attrRich"] = attrRich metadata["attrPlain"] = attrPlain if metadata.get("title") == None: try: metadata["title"] = attrPlain except UnboundLocalError: metadata["title"] = ( metadata["created"].strftime("%B %e, %Y %-I.%M") + metadata["created"].strftime("%p").lower() ) # ensure slug and title are stored as strings even if dates metadata["title"] = str(metadata["title"]) metadata["slug"] = str(metadata["slug"]) # create empty directories to be populated with 'interlinks' and 'backlinks' metadata["interlinks"] = [] metadata["backlinks"] = [] return metadata def ingestDocuments(): if verbosity > 1: print("Ingesting files ") fileList = getFiles() global documents global site documents = {} slugToTitleLookup = {} slugToUidLookup = {} siteSecondaries = [] sitePrimaries = [] siteSeries = [] global tags tags = [] global searchIndex searchIndex = [] uuidCollisionLookup = [] for filename in fileList: site["pagecount"] += 1 if verbosity > 2: print(f" {filename}") with open(filename) as f: rawMetadata, content = frontmatter.parse(f.read()) metadata = preprocessMetadata(filename, rawMetadata) # insert wordcount into metadata and add to site wordcount metadata["wordcount"] = len(content.split()) site["wordcount"] += metadata["wordcount"] if metadata["primary"] != "references": site["wordcountWithoutReferences"] += metadata["wordcount"] combined = metadata combined["content"] = content # Add each files content and metadata to global 'documents' documents[metadata["uid"]] = combined # Update global dictionary of internal links (part of backlinking strategy) slugToTitleLookup[metadata["slug"]] = metadata["title"] slugToUidLookup[metadata["slug"]] = metadata["uid"] # Append primaries and secondaries to globals sitePrimaries.append(metadata["primary"]) siteSecondaries.append(metadata["secondary"]) try: siteSeries.append(metadata["series"]) except KeyError: pass try: for tag in metadata["tags"]: tags.append(tag) except KeyError: pass except TypeError: pass # Append to search index if not marked 'nofeed' options = metadata.get("options") if options is not None and "nofeed" in options: continue else: searchIndex.append( { "title": metadata["title"], "slug": metadata["slug"], "content": content, "primary": metadata["primary"], "secondary": metadata["secondary"], } ) uuidCollisionLookup.append(metadata["uid"]) site["slugToUidLookup"] = slugToUidLookup site["slugToTitleLookup"] = slugToTitleLookup # Check for UUID collisions # More specifically, it checks for collisions using only the first 8 # digits of each UUID, this is to ensure that this short prefix is enough to # uniquely identify each document and can be used widely for referencing. if verbosity > 2: print("Checking for collisions") prefixList = [key[0:8] for key in uuidCollisionLookup] if not len(set(prefixList)) == len(prefixList): sys.exit("CRITICAL ERROR: UUID prefix collision") # Dedupe global primaries/tags lists sitePrimaries = list(dict.fromkeys(sitePrimaries)) siteSecondaries = list(dict.fromkeys(siteSecondaries)) tags = list(dict.fromkeys(tags)) # Insert count of site pages into site metadata site["primaries"] = sitePrimaries site["secondaries"] = siteSecondaries site["tags"] = tags # Transform search index into valid JSON searchIndex = json.dumps(searchIndex) if verbosity > 1: sitePagecount = site["pagecount"] print(f" Ingested {sitePagecount} files") def insertSubstitutions(): if verbosity > 1: print("Performing substitutions") transclusionList = [] REF_LINK_RE = re.compile(r"\[([^\]]*?)\]\((.*?::)([^)]+)\)") REF_SLUG_RE = re.compile(r"(? 2: print(f" {key, page['title']}") text = page["content"] # Replace instances of site::pagecount and site::wordcount # accordingly. This is a bodge text = text.replace("site::pagecount", str(site["pagecount"])) text = text.replace("site::wordcount", "{:,}".format(site["wordcount"])) # Replace UUID document references in the source text # See "SEP: Reference documents by their URN/UUID" for the rationale # Find all valid UUID reference links, eg: # [](slug::74534cff) # [:Replace text](slug::74534cff) # REF_LINK_RE = re.compile(r"\[([^\]]*)\]\((![^)]+)\)") refSlugs = list(REF_SLUG_RE.findall(text)) for refType, refShortId in refSlugs: match = f"{refType}{refShortId}" refId = "" for k in mergedData: if k.startswith(refShortId): refId = k refSlug = f"/{mergedData[refId]['slug']}" replacement = refSlug text = text.replace(match, replacement) refLinks = list(REF_LINK_RE.findall(text)) site["references"] += len(refLinks) # Process each match for refTextMatch, refType, refShortId in refLinks: match = f"[{refTextMatch}]({refType}{refShortId})" if refType not in [ "link::", "in::", "sin::", "img::", "video::", "quote::", ]: raise Exception( f"Unexpected Internal Reference type '{refType}' in document:\ {key}\n match: {match}" ) else: refId = "" for k in mergedData: if k.startswith(refShortId): refId = k # If refText is blank OR begins with a '::' lookup the title of # the linked document, else pass through. if refTextMatch.startswith("::") or refTextMatch == "": try: refText = mergedData[refId]["title"] except: refText = mergedData[refId]["description"] else: refText = refTextMatch # Lookup slug of linked document refSlug = f"/{mergedData[refId]['slug']}" if refType in ["link::", "img::"]: replacement = f"[{refText}]({refSlug})" text = text.replace(match, replacement) elif refType in ["quote::"]: refSrc = mergedData[refId]["source"]["creator"] refText = mergedData[refId]["content"].replace("\n\n", "\n>\n> ") replacement = f"> {refText}\\\n> — [{refSrc}]({refSlug})" text = text.replace(match, replacement) elif refType in ["in::", "sin::"]: transcludePayload = { "refType": refType, "insrc_uuid": key, "insrc_match": match, "refId": refId, "inref_linktext": refText, "inref_linktarget": refSlug, } transclusionList.append(transcludePayload) elif refType == "video::": # Assemble payload replacement = f'``` =html\n\n```' # Find and replace the original link with the updated one text = text.replace(match, replacement) # If no matching UUID found for reference, raise Exception if refId == "": raise Exception( f"\nUnmatched UUID reference:\n" f" document: {key}\n" f" {refShortId} does not reference an existing document" ) # Write modified plaintext back to document variable once all # substitutions have been carried out. page["content"] = text # Carry out transclusions (these must occur after all other link references to ensure such links are present in transcluded elements) for transclude in transclusionList: insrc_uuid = transclude["insrc_uuid"] insrc_match = transclude["insrc_match"] refId = transclude["refId"] inref_transclude = run( "jotdown", input=documents[refId]["content"], text=True, capture_output=True, ).stdout refText = transclude["inref_linktext"] refSlug = transclude["inref_linktarget"] # Assemble payload if transclude["refType"] == "in::": replacement = f'``` =html\n
\n

{inref_transclude}

\n
from {refText}
\n
\n```' else: replacement = f'``` =html\n
\n{inref_transclude}\n
\n```' # Find and replace the original link with the assembled transclusion documents[insrc_uuid]["content"] = documents[insrc_uuid]["content"].replace( insrc_match, replacement ) def generateHTML(): if verbosity > 1: print("Generating HTML") for key, page in documents.items(): # Convert lightweight markup to HTML parsed = run("jotdown", input=page["content"], text=True, capture_output=True) html = parsed.stdout documents[key]["html"] = html def buildBacklinks(): # `interlinks` is a metadata element listing the UIDs of all the documents # that the document points OUT to. # `backlinks` is a metadata element listing the UIDs of the documents that # point IN TO the document. if verbosity > 1: print("Building backlinks ") global interlinkCount interlinkCount = 0 INLINE_LINK_RE = re.compile( r"\[[^\]]*(?:\[[^\]]*\][^\]]*)*\]\(\/([^)]*)\)", re.DOTALL ) FOOTNOTE_LINK_URL_RE = re.compile(r"\[.+?\]:\s\/(.*)", re.DOTALL) for key, page in documents.items(): if "nobacklinks" in page.get("options", ""): continue else: if verbosity > 2: print(page["title"]) text = page["content"] interlinks = [] # locate all internal references between documents. inlineRefs = list(INLINE_LINK_RE.findall(text)) footnoteRefs = list(FOOTNOTE_LINK_URL_RE.findall(text)) combinedRefs = inlineRefs + footnoteRefs # For each internal link, use its slug to lookup the UUID of the # document it points too, add that to a list called 'interlinks' for slug in combinedRefs: try: link_uid = site["slugToUidLookup"][slug] interlinks.append(link_uid) interlinkCount += 1 except KeyError: # Ignore errors from slugs that: point to feeds and images, # start with '$', or appear in the exlude list if ( slug.startswith(("feeds/", "images/", "$")) or slug.endswith( ( ".jpg", ".webp", ".png", ".svg", ".pdf", ".gif", ) ) or slug in ["publickey", "humans.txt", "build.py"] ): continue # If none of the above catch the error, print the title, # uuid, and slug to stdout but continue with build. else: print() print(page["title"], key) print(f"KeyError: {slug}") continue # Deduplicate interlinks and store interlinks = list(dict.fromkeys(interlinks)) documents[key]["interlinks"] = interlinks # Insert document key into backlinks of referenced documents for interlink_key in interlinks: documents[interlink_key]["backlinks"].append(key) site["backlinks"] += len(interlinks) def buildCollections(): global collections collections = {} global sitemap sitemap = [] ## Need to build in a check to make sure there are no collisions between primaries/collections/tags for primary in site["primaries"]: collections.update({primary: []}) for secondary in site["secondaries"]: collections.update({secondary: []}) # Create empty collection for each tag for tag in tags: collections.update({tag: []}) # Create empty 'everything' collection collections.update({"everything": []}) # Add each page to collections for each of its tags unless 'nofeed' is found # in metadata['options'], (though even those will appear in the sitemap variable) # Only 'drafts' are omitted from the sitemap for key, page in sorted( documents.items(), key=lambda k_v: k_v[1]["available"], reverse=True ): options = page.get("options") if options is not None and "nofeed" in options: sitemap.append(page) continue elif page["slug"].startswith("drafts"): continue else: sitemap.append(page) collections["everything"].append(page) collections[page["primary"]].append(page) collections[page["secondary"]].append(page) try: for tag in page["tags"]: collections[tag].append(page) except KeyError: pass except TypeError: pass def outputHTML(): if verbosity > 1: print("Generating Hypertext") for key, page in documents.items(): collection = [] if verbosity > 2: print(f" {page['filename']} >> ", end="", flush=True) templateFile = page.get("layout", defaultTemplate) template = env.get_template(templateFile) try: for include in page["collection"]["include"]: collection.append(collections[include]) # If a page has multiple tags in its collection, sort chronologicaly if len(page["collection"]["include"]) > 0: collection = [item for sublist in collection for item in sublist] collection.sort(key=lambda x: x["available"], reverse=True) except KeyError: pass # Generate the static page output = template.render( documents=documents, assets=assets, collections=collections, collection=collection, page=page, site=site, searchIndex=searchIndex, ) # Ready output path outputPath = f"{outputDir + page['slug']}/index.html" # Create output directory tree os.makedirs(os.path.dirname(outputPath), exist_ok=True) # Write final html with open(outputPath, "w") as f: f.write(output) if verbosity > 2: print(f"{outputPath}") def renderFeed(feedName): slug = os.path.join("feeds", feedName) feedPath = os.path.join(outputDir, slug, "index.xml") template = env.get_template(feedTemplate) feedContent = template.render( site=site, slug=slug, collection=feedName, feed=collections[feedName], ) feed = {"name": feedName, "output": feedContent, "path": feedPath} return feed def outputFeeds(): if verbosity > 1: print("Generating Feeds") feedList = site["primaries"] + site["secondaries"] + ["everything"] for entry in feedList: if verbosity > 2: print(f" {entry} >> ", end="", flush=True) feed = renderFeed(entry) os.makedirs(os.path.dirname(feed["path"]), exist_ok=True) with open(feed["path"], "w") as f: f.write(feed["output"]) if verbosity > 2: print(feed["path"]) # Render feed stylesheet (XSL) if verbosity > 1: print("Creating XLS Stylesheet for Feeds") template = env.get_template(feedXlsTemplate) outputPath = os.path.join(outputDir, "feed.xsl") os.makedirs(os.path.dirname(outputPath), exist_ok=True) output = template.render(site=site) with open(outputPath, "w") as f: f.write(output) if verbosity > 2: print(f"{outputPath}") def outputSitemap(): if verbosity > 1: print("Generating Sitemap") template = env.get_template(sitemapTemplate) output = template.render(sitemap=sitemap, site=site) outputPath = outputDir + "sitemap.xml" with open(outputPath, "w") as f: f.write(output) if verbosity > 2: print(f" {outputPath}") def copyAssets(): # Copy assets (excluding images, including SVGs) if verbosity > 1: print("Copying Assets") assets = (f for f in Path(assetsDir).glob("**/*") if f.is_file()) for item in assets: # Copy SVGs, GIFs, and MP4s to output dir. if str(item).endswith((".svg", ".mp4", ".gif")): if verbosity > 2: print(f" {item} >> ", end="", flush=True) outputPath = outputDir + ("/".join(Path(item).parts[1:])) os.makedirs(os.path.dirname(outputPath), exist_ok=True) copyfile(item, outputPath) if verbosity > 2: print(outputPath) elif "/templates/" not in str(item) and "/images/" not in str(item): if verbosity > 2: print(f" {item} >> ", end="", flush=True) outputPath = outputDir + ("/".join(Path(item).parts[1:])) # Create output directory os.makedirs(os.path.dirname(outputPath), exist_ok=True) copyfile(item, outputPath) if verbosity > 2: print(outputPath) profend("Imports, setup, and definitions") # Run if buildDepth in ["normal", "production"]: profbegin() processAssets() profend("Processed asset manifest") profbegin() ingestDocuments() profend("\033[1mIngested documents\033[0m") profbegin() insertSubstitutions() refCount = site["references"] profend(f"Inserted {refCount} substitutes") profbegin() generateHTML() profend("\033[1mGenerated HTML\033[0m") profbegin() buildBacklinks() profend(f"Built {interlinkCount} backlinks") profbegin() buildCollections() profend("Built collections") profbegin() outputHTML() profend("Wrote out HTML files") profbegin() outputFeeds() profend("Built feeds") profbegin() outputSitemap() profend("Built sitemap") profbegin() copyAssets() profend("Copied assets") else: profbegin() ingestDocuments() profend("\033[1mIngested documents\033[0m") profbegin() buildCollections() profend("Built collections") profbegin() generateHTML() profend("\033[1mGenerated HTML\033[0m") profbegin() outputHTML() profend("Wrote out HTML files") # Profiling # Change back to directory that user was in prior to build os.chdir(startDirectory) end = time.time() duration = round((end - start), 3) buildtime = time.strftime("%H:%M:%S", time.localtime(end)) pagecount = site["pagecount"] if verbosity > 0: print(f"{buildtime} Built {pagecount} in {duration} seconds") #