python-webgen/webgen.py

95 lines
4.1 KiB
Python
Raw Normal View History

2025-03-11 21:03:39 -07:00
# imports
import parser
import stitcher
import argparse
import os
from bs4 import BeautifulSoup as bs
# argument parser
argparser = argparse.ArgumentParser(description = "A simple set of python scripts to generate HTML files from a set of markdown files.")
argparser.add_argument("input_path", help = "the location of the folder to parse")
argparser.add_argument("output_path", help = "the location to output the finalised files")
argparser.add_argument("--template_path", help = "the location of the template files (default: ./templates/)", default = "./templates/")
argparser.add_argument("--css_file", help = "the location of a css file to copy to the output directory (default: ./templates/stylesheet.css)", default = "./templates/stylesheet.css")
argparser.add_argument("--site_title", help = "the title shown on the main page (default: \"Blog\")", default="Blog")
argparser.add_argument("--title_all", help = "add the main title to all articles", action="store_true", dest="allTitles")
argparser.add_argument("-v", help = "more output", action = "store_true", dest = "verbose")
args = argparser.parse_args()
# global vars
articles = []
root = args.input_path
htmls = {}
article_timestamps = {}
titles = []
for rootDir, dirs, files in os.walk(args.input_path):
for article in files:
articles.append(article)
print("\nDiscovering articles...\n")
for article in articles:
article = root + article
fstring = "Found {draft}{title} at {rootPath}{path}"
isDraft = "[Draft] " if parser.getMetadata(article)['draft'] else ""
print(fstring.format(title = parser.getTitle(article), rootPath = root, path = article, draft = isDraft))
print("\nGenerating HTML files from template...")
for article in articles:
article = root + article
if parser.getMetadata(article)['draft'] == True:
if args.verbose: print('"{}" is a draft, skipping...'.format(parser.getTitle(article)))
continue
if args.allTitles:
htmls[article] = stitcher.createArticle(article, args.template_path, args.site_title)
else:
htmls[article] = stitcher.createArticle(article, args.template_path)
if args.verbose:
print(htmls[article], end='\n')
print("\nWriting files to {}...\n".format(args.output_path))
for article in articles:
workingDir = args.output_path + article[:-3] + "/"
article = root + article
if parser.getMetadata(article)['draft'] == True:
if args.verbose: print('{} is a draft, skipping...'.format(article))
continue
html = bs(htmls[article]).prettify()
if not os.path.exists(workingDir):
os.makedirs(workingDir)
if args.verbose: print("creating directory {}".format(workingDir))
open(workingDir + "index.html", 'w').write(html)
if args.verbose:
print("wrote {} to {}".format(article, workingDir))
print("Files written")
print("\nSorting non-draft articles by timestamp...\n")
for article in articles:
article = root + article
if parser.getMetadata(article)['draft'] == True:
if args.verbose: print('{} is a draft, skipping...'.format(parser.getTitle(article)))
continue
article_timestamps[article] = parser.getMetadata(article)['date']
sorted_articles = sorted(article_timestamps.items(), key = lambda item: item[1], reverse = True)
if args.verbose: print(sorted_articles)
print("\nGenerating main page...")
for article in sorted_articles:
article = article[0]
title = parser.getTitle(article)
link = article.split("/")[-1][:-3]
titles.append([title, link])
if args.verbose: print("added {} to titles list with link {}".format(title, link))
mainPage = bs(stitcher.createMainPage(titles, args.template_path, args.site_title)).prettify()
print("Generated main page")
print("Writing main page to {}".format(args.output_path))
open(args.output_path + "index.html", 'w').write(mainPage)
print("Succesfully wrote main page to {}index.html".format(args.output_path))
print("\nCopying CSS file...")
if args.verbose: print(args.css_file, "-->", args.output_path + "stylesheet.css")
css = open(args.css_file, 'r').read()
open(args.output_path + "stylesheet.css", 'w').write(css)
print("Copied CSS file")
print("\nAll done!")