├── narrator
├── __init__.py
├── llm.py
├── logger.py
├── db.py
├── summarizer.py
└── cli.py
├── requirements.txt
├── .gitignore
├── run.py
├── assets
└── dashboard.png
├── scripts
└── setup_terminal_logger.sh
├── dashboard.py
└── README.md
/narrator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | streamlit
3 | plotly
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | __pycache__/
3 | *.pyc
4 | data/*
5 | logs/*
6 |
--------------------------------------------------------------------------------
/run.py:
--------------------------------------------------------------------------------
1 | from narrator.cli import main
2 |
3 | if __name__ == "__main__":
4 | main()
--------------------------------------------------------------------------------
/assets/dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vianarafael/codechrono/HEAD/assets/dashboard.png
--------------------------------------------------------------------------------
/narrator/llm.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | MODEL_NAME = "qwen3:14b-q4_K_M" # Change to the model you are using
4 |
5 | def run_llm(prompt: str) -> str:
6 | response = requests.post(
7 | "http://localhost:11434/api/generate",
8 | json={"model": MODEL_NAME, "prompt": prompt, "stream": False}
9 | )
10 | return response.json().get("response", "").strip()
--------------------------------------------------------------------------------
/scripts/setup_terminal_logger.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LOG_DIR="$HOME/.code_narrator"
4 | mkdir -p "$LOG_DIR"
5 |
6 | LOG_FILE="$LOG_DIR/terminal.log"
7 | touch "$LOG_FILE"
8 |
9 | echo "🔧 Adding PROMPT_COMMAND to your shell config..."
10 |
11 | if [ -f "$HOME/.zshrc" ]; then
12 | SHELL_RC="$HOME/.zshrc"
13 | elif [ -f "$HOME/.bashrc" ]; then
14 | SHELL_RC="$HOME/.bashrc"
15 | else
16 | echo "❌ Could not find .bashrc or .zshrc"
17 | exit 1
18 | fi
19 |
20 | if ! grep -q "PROMPT_COMMAND.*code_narrator" "$SHELL_RC"; then
21 | echo -e "\n# >>> Code Narrator Logger >>>" >> "$SHELL_RC"
22 | echo "export PROMPT_COMMAND='echo \$(date +%s) ::: \$(history 1 | sed \"s/ *[0-9]* *//\") >> $LOG_FILE'" >> "$SHELL_RC"
23 | echo "# <<< Code Narrator Logger <<<" >> "$SHELL_RC"
24 | echo "✅ Logger added to $SHELL_RC"
25 | else
26 | echo "⚠️ PROMPT_COMMAND hook already exists."
27 | fi
28 |
29 | echo "🟢 Done. Restart your terminal or run: source $SHELL_RC"
30 |
--------------------------------------------------------------------------------
/dashboard.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | import pandas as pd
3 | import plotly.express as px
4 | import os
5 | from datetime import datetime
6 | import streamlit as st
7 |
8 |
9 |
10 | DB_PATH = "data/sessions.db"
11 |
12 | if not os.path.exists(DB_PATH):
13 | st.error("No session database found.")
14 | else:
15 | conn = sqlite3.connect(DB_PATH)
16 | df = pd.read_sql_query("""
17 | SELECT
18 | message,
19 | start_time,
20 | end_time,
21 | duration,
22 | summary
23 | FROM sessions
24 | WHERE end_time IS NOT NULL
25 | ORDER BY start_time DESC
26 | """, conn)
27 | conn.close()
28 |
29 | df['start'] = pd.to_datetime(df['start_time'], unit='s')
30 | df['end'] = pd.to_datetime(df['end_time'], unit='s')
31 | df['duration_hours'] = df['duration'] / 3600
32 |
33 | st.title("📊 Timecraft Dashboard")
34 |
35 | st.subheader("🧠 Feature Log")
36 | st.dataframe(df[['start', 'message', 'duration_hours', 'summary']])
37 |
38 | st.subheader("⏱️ Time per Feature (hrs)")
39 | fig = px.bar(df, x='start', y='duration_hours', color='message', title='Duration per Feature')
40 | st.plotly_chart(fig)
41 |
42 | st.subheader("📈 Speed Trend (hrs per session)")
43 | fig2 = px.line(df.sort_values('start'), x='start', y='duration_hours', markers=True, title='Velocity Over Time')
44 | st.plotly_chart(fig2)
45 |
--------------------------------------------------------------------------------
/narrator/logger.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import time
4 | import threading
5 |
6 | LOG_DIR = "logs"
7 | TERMINAL_LOG = os.path.join(LOG_DIR, "terminal.log")
8 | WINDOW_LOG = os.path.join(LOG_DIR, "active_window.log")
9 |
10 | _window_logger_thread = None
11 | _stop_window_logger = threading.Event()
12 |
13 |
14 | def clear_logs():
15 | os.makedirs(LOG_DIR, exist_ok=True)
16 | open(TERMINAL_LOG, "w").close()
17 | open(WINDOW_LOG, "w").close()
18 |
19 |
20 | def start_window_logger():
21 | def log_windows():
22 | while not _stop_window_logger.is_set():
23 | try:
24 | window = subprocess.check_output(
25 | ["xdotool", "getactivewindow", "getwindowname"]
26 | ).decode().strip()
27 | with open(WINDOW_LOG, "a") as f:
28 | f.write(f"{int(time.time())} ::: {window}\n")
29 | except Exception:
30 | pass
31 | time.sleep(5)
32 |
33 | global _window_logger_thread
34 | _stop_window_logger.clear()
35 | _window_logger_thread = threading.Thread(target=log_windows, daemon=True)
36 | _window_logger_thread.start()
37 |
38 |
39 | def stop_window_logger():
40 | _stop_window_logger.set()
41 | if _window_logger_thread:
42 | _window_logger_thread.join()
43 |
44 |
45 | def get_terminal_logs(start_ts, end_ts):
46 | if not os.path.exists(TERMINAL_LOG):
47 | return []
48 | results = []
49 | with open(TERMINAL_LOG, "r") as f:
50 | for line in f:
51 | try:
52 | ts_str, cmd = line.strip().split(" ::: ", 1)
53 | ts = int(ts_str)
54 | if start_ts <= ts <= end_ts:
55 | results.append(cmd)
56 | except ValueError:
57 | continue
58 | return results
59 |
60 |
61 | def get_window_logs(start_ts, end_ts):
62 | if not os.path.exists(WINDOW_LOG):
63 | return []
64 | timeline = []
65 | with open(WINDOW_LOG, "r") as f:
66 | for line in f:
67 | try:
68 | ts_str, window = line.strip().split(" ::: ", 1)
69 | ts = int(ts_str)
70 | if start_ts <= ts <= end_ts:
71 | timeline.append(window)
72 | except ValueError:
73 | continue
74 | return timeline
75 |
76 |
77 | def get_git_diff():
78 | try:
79 | diff = subprocess.check_output(["git", "diff"]).decode()
80 | return diff
81 | except Exception:
82 | return "No diff available"
83 |
--------------------------------------------------------------------------------
/narrator/db.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 | import os
3 |
4 | DB_PATH = "data/sessions.db"
5 | os.makedirs("data", exist_ok=True)
6 |
7 |
8 | def init_db():
9 | with sqlite3.connect(DB_PATH) as conn:
10 | cur = conn.cursor()
11 | cur.execute("""
12 | CREATE TABLE IF NOT EXISTS sessions (
13 | id INTEGER PRIMARY KEY AUTOINCREMENT,
14 | message TEXT,
15 | start_time INTEGER,
16 | end_time INTEGER,
17 | duration INTEGER,
18 | summary TEXT
19 | )
20 | """)
21 | conn.commit()
22 |
23 |
24 |
25 | def create_session(message, start_ts):
26 | with sqlite3.connect(DB_PATH) as conn:
27 | cur = conn.cursor()
28 | cur.execute(
29 | "INSERT INTO sessions (message, start_time, end_time, summary) VALUES (?, ?, ?, ?)",
30 | (message, start_ts, None, None)
31 | )
32 | conn.commit()
33 |
34 |
35 | def finalize_session(start_ts, end_ts, summary):
36 | duration = end_ts - start_ts
37 | with sqlite3.connect(DB_PATH) as conn:
38 | cur = conn.cursor()
39 | cur.execute("""
40 | UPDATE sessions
41 | SET end_time = ?, duration = ?, summary = ?
42 | WHERE start_time = ?
43 | """, (end_ts, duration, summary, start_ts))
44 | conn.commit()
45 |
46 | def get_recent_summaries(limit=5):
47 | with sqlite3.connect(DB_PATH) as conn:
48 | conn.row_factory = sqlite3.Row
49 | cur = conn.cursor()
50 | cur.execute("""
51 | SELECT message, start_time as start, end_time as end, summary
52 | FROM sessions
53 | WHERE summary IS NOT NULL
54 | ORDER BY start_time DESC
55 | LIMIT ?
56 | """, (limit,))
57 | return [dict(row) for row in cur.fetchall()]
58 |
59 | def get_last_unfinished_session():
60 | with sqlite3.connect(DB_PATH) as conn:
61 | cur = conn.cursor()
62 | cur.execute("""
63 | SELECT start_time FROM sessions
64 | WHERE end_time IS NULL
65 | ORDER BY start_time DESC
66 | LIMIT 1
67 | """)
68 | row = cur.fetchone()
69 | return row[0] if row else None
70 |
71 | def get_estimation_data():
72 | with sqlite3.connect(DB_PATH) as conn:
73 | conn.row_factory = sqlite3.Row
74 | cur = conn.cursor()
75 | cur.execute("""
76 | SELECT message, (end_time - start_time) as duration, summary
77 | FROM sessions
78 | WHERE summary IS NOT NULL AND end_time IS NOT NULL
79 | """)
80 | return [dict(row) for row in cur.fetchall()]
81 |
82 |
83 |
--------------------------------------------------------------------------------
/narrator/summarizer.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 | from narrator.llm import MODEL_NAME
4 |
5 | NAME = "Rafael"
6 |
7 | OLLAMA_URL = "http://localhost:11434/api/generate"
8 |
9 |
10 | def build_prompt(commands, git_diff, window_log):
11 | prompt = "You are an AI dev assistant. Summarize the developer's coding session.\n\n"
12 |
13 | if commands:
14 | prompt += "## Commands Run:\n" + "\n".join(commands[:50]) + "\n\n"
15 |
16 | if git_diff:
17 | prompt += "## Git Changes:\n" + git_diff[:3000] + "\n\n" # Limit size to avoid overload
18 |
19 | if window_log:
20 | prompt += "## App Usage Timeline:\n" + "\n".join(window_log[:50]) + "\n\n"
21 |
22 | prompt += (
23 | "### Summary Task:\n"
24 | "Summarize what was worked on: features, bugs, deployments, refactors.\n"
25 | "Use clear bullet points, but no markup. Be concise but informative."
26 | )
27 |
28 | return prompt
29 |
30 |
31 | def summarize_session(commands, git_diff, window_log):
32 | prompt = build_prompt(commands, git_diff, window_log)
33 | try:
34 | response = requests.post(OLLAMA_URL, json={
35 | "model": MODEL_NAME,
36 | "prompt": prompt,
37 | "stream": False
38 | })
39 | response.raise_for_status()
40 | summary = response.json()["response"]
41 | summary = summary = re.sub(r".*?", "", summary, flags=re.DOTALL).strip()
42 |
43 | return summary
44 | except Exception as e:
45 | return f"⚠️ Error during summarization: {e}"
46 |
47 | def estimate_time(task_description, past_sessions):
48 | prompt = f"You are an AI assistant that estimates how long it will take {NAME} to complete a task.\n\n"
49 | prompt += "Here are some of his past dev sessions:\n"
50 |
51 | for s in past_sessions[:10]: # limit for now
52 | hours = round(s["duration"] / 3600, 2)
53 | prompt += f"### Session ({hours}h)\n- Feature: {s['message']}\n- Summary: {s['summary']}\n\n"
54 |
55 | prompt += f"Task: {task_description}\n\nHow long will this take? Respond with a time estimate and a short rationale."
56 | prompt += f"""
57 | Task: {task_description}
58 |
59 | Please respond with:
60 | 1. A single estimated time range (e.g. '3–5 hours')
61 | 2. A bullet-point breakdown (feature → time)
62 | 3. A short note on factors that may affect the estimate
63 | Don't use markdown. Be concise.
64 | """
65 | try:
66 | response = requests.post(OLLAMA_URL, json={
67 | "model": MODEL_NAME,
68 | "prompt": prompt,
69 | "stream": False
70 | })
71 | estimate = response.json()["response"]
72 | estimate = re.sub(r".*?", "", estimate, flags=re.DOTALL).strip()
73 | return estimate
74 | except Exception as e:
75 | return f"⚠️ Error estimating time: {e}"
76 |
77 |
--------------------------------------------------------------------------------
/narrator/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import time
3 | from narrator import logger, db, summarizer, llm
4 | import re
5 |
6 | SESSION_STATE = {"start_time": None}
7 |
8 |
9 | def start_session(args):
10 | print(f"🔵 Starting session: {args.message}")
11 | SESSION_STATE["start_time"] = int(time.time())
12 |
13 | logger.clear_logs()
14 | logger.start_window_logger()
15 |
16 | db.init_db()
17 | db.create_session(args.message, SESSION_STATE["start_time"])
18 |
19 | print("⏳ Session running. Use 'stop' to end.")
20 |
21 |
22 | def stop_session(args):
23 | end_time = int(time.time())
24 | start_time = SESSION_STATE.get("start_time")
25 |
26 | start_time = db.get_last_unfinished_session()
27 | if not start_time:
28 | print("⚠️ No active session found in DB.")
29 | return
30 |
31 | print("🛑 Stopping session...")
32 | logger.stop_window_logger()
33 |
34 | commands = logger.get_terminal_logs(start_time, end_time)
35 | windows = logger.get_window_logs(start_time, end_time)
36 | git_diff = logger.get_git_diff()
37 |
38 | summary = summarizer.summarize_session(commands, git_diff, windows)
39 | db.finalize_session(start_time, end_time, summary)
40 |
41 | print("✅ Summary:")
42 | print(summary)
43 |
44 | def estimate_task(args):
45 | print(f"📐 Estimating time for: {args.message}")
46 | past_sessions = db.get_estimation_data()
47 | estimate = summarizer.estimate_time(args.message, past_sessions)
48 | print(f"\n🧮 Estimated Time: {estimate}\n")
49 |
50 |
51 | def report_sessions(args):
52 | print("📄 Recent Sessions:")
53 | sessions = db.get_recent_summaries()
54 | for s in sessions:
55 | start = time.ctime(s["start"])
56 | end = time.ctime(s["end"])
57 | message = s.get("message", "(no tag)")
58 | duration = round((s["end"] - s["start"]) / 60, 1) # in minutes
59 |
60 | summary = s.get("summary", "").strip()
61 | summary = re.sub(r".*?", "", summary, flags=re.DOTALL).strip()
62 |
63 | print(f"\n🕒 {start} — {end} ⏱ {duration} min 🏷 {message}\n{summary}\n")
64 |
65 | def query_analysis(query_prompt: str, session_data: list) -> str:
66 | if not session_data:
67 | return "No past session data available."
68 |
69 | formatted_sessions = []
70 | for session in session_data:
71 | message = session.get("message", "")
72 | duration = round(session.get("duration", 0) / 3600, 2)
73 | summary = session.get("summary", "")
74 | formatted_sessions.append(f"- {message} | {duration} hours\n {summary}")
75 |
76 | prompt = f"""
77 | Here's a list of my dev sessions with task name, time spent, and summaries:
78 |
79 | {chr(10).join(formatted_sessions)}
80 |
81 | Based on the above, please answer the following:
82 | {query_prompt}
83 | """
84 |
85 | raw_response = llm.run_llm(prompt)
86 | cleaned = re.sub(r".*?", "", raw_response, flags=re.DOTALL)
87 | cleaned = re.sub(r"[#*`>_]", "", cleaned) # strip markdown chars
88 | return cleaned.strip()
89 |
90 | def query_sessions(args):
91 | print(f"🧠 Analyzing session history with query: {args.message}")
92 | data = db.get_estimation_data()
93 | result = query_analysis(args.message, data)
94 | print(f"\n🤖 {result}\n")
95 |
96 |
97 |
98 | def main():
99 | parser = argparse.ArgumentParser(prog="code_narrator", description="Track and summarize dev sessions.")
100 | subparsers = parser.add_subparsers(dest="command")
101 |
102 | start = subparsers.add_parser("start", help="Start a coding session")
103 | start.add_argument("-m", "--message", type=str, required=True, help="Tag or description of the session")
104 | start.set_defaults(func=start_session)
105 |
106 | stop = subparsers.add_parser("stop", help="Stop the current session")
107 | stop.set_defaults(func=stop_session)
108 |
109 | report = subparsers.add_parser("report", help="Show past session summaries")
110 | report.set_defaults(func=report_sessions)
111 |
112 | estimate = subparsers.add_parser("estimate", help="Estimate time for a new task")
113 | estimate.add_argument("-m", "--message", type=str, required=True, help="Task description to estimate")
114 | estimate.set_defaults(func=estimate_task)
115 |
116 | query = subparsers.add_parser("query", help="Ask high-level questions about your session history")
117 | query.add_argument("-m", "--message", type=str, required=True, help="Query prompt for the LLM")
118 | query.set_defaults(func=query_sessions)
119 |
120 |
121 |
122 | args = parser.parse_args()
123 |
124 | if hasattr(args, "func"):
125 | args.func(args)
126 | else:
127 | parser.print_help()
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CodeChrono helps you estimate how long software tasks will take — based on how long you actually take to build them.
2 |
3 | A local LLM-powered dev session logger that watches your terminal, code changes, and app focus — then summarizes what you worked on.
4 |
5 | Built to help you estimate how long real work takes—based on your own history, not guesswork.
6 |
7 | ---
8 |
9 | ## 🚀 Features
10 |
11 | - ✅ Tracks terminal commands and app focus
12 | - ✅ Summarizes git diffs
13 | - ✅ Uses a local LLM (via [Ollama](https://ollama.com)) to generate summaries
14 | - ✅ Stores everything locally in SQLite
15 | - ✅ **Estimates time to complete new tasks based on your real history**
16 | - ✅ Fully offline, no tracking
17 |
18 | ---
19 | ## 📊 Your Dev Work — Visualized
20 |
21 | See what you worked on, how long it took, and whether you’re speeding up — all in a clean local dashboard.
22 |
23 | 
24 |
25 | ---
26 |
27 | ## 📦 Requirements
28 |
29 | - Python 3.8+
30 | - [Ollama](https://ollama.com) installed and running a model (e.g. `qwen3`)
31 | - Shell that supports `PROMPT_COMMAND` (bash, zsh)
32 | - App focus tracking (optional):
33 | - `xdotool` for Linux
34 | - `osascript` for macOS (basic support via AppleScript)
35 | - [NirCmd](https://www.nirsoft.net/utils/nircmd.html) or custom PowerShell for Windows
36 |
37 |
38 | ---
39 |
40 | ## 📥 Setup
41 |
42 | 1. **Clone this repo**
43 |
44 | 2. **Install Python dependencies**
45 | ```bash
46 | pip install -r requirements.txt
47 | ```
48 |
49 | 3. **Set up terminal logging and run ollama**
50 | ```bash
51 | bash scripts/setup_terminal_logger.sh
52 | source ~/.bashrc # or source ~/.zshrc
53 |
54 | ollama run qwen3:14b-q4_K_M # change MODEL_NAME in narrator/llm.py - since I have logic to strip ...
55 | ```
56 |
57 | ---
58 |
59 | ## 🛠 Usage
60 |
61 | ```bash
62 | python run.py start -m "refactor login flow"
63 |
64 | python run.py stop
65 |
66 | # view recent summaries
67 | python run.py report
68 | ```
69 |
70 | **🧪 Example Output**
71 | ```bash
72 | ## Summary (2h session)
73 | - Fixed bug in `auth.py` handling token expiration
74 | - Ran tests and confirmed fix
75 | - Researched error via Stack Overflow
76 | ```
77 |
78 | ---
79 |
80 | ## 🖥 Launch the Dashboard
81 |
82 | To visualize your session history and track your development speed over time, run:
83 |
84 | ```bash
85 | streamlit run dashboard.py
86 | ```
87 | Once it starts, open [http://localhost:8501](http://localhost:8501) in your browser.
88 |
89 | The dashboard shows:
90 |
91 | - ⏱️ Time spent per session
92 | - ⚡ Your fastest vs slowest tasks
93 | - 📉 Whether you're getting faster or slower
94 | - 🧱 A breakdown of features you've built
95 |
96 | > 💡 **Tip:** Make sure you’ve logged at least one session before launching the dashboard.
97 |
98 |
99 |
100 | ## 🔮 Estimate time for a new feature
101 |
102 | ```bash
103 | python run.py estimate -m "build settings page for admin panel"
104 | ```
105 |
106 | **🧪 Example Output**
107 | ```bash
108 | 🧮 Estimated Time: 2–3 hours.
109 | This task is similar to your previous “settings UI” session (3h), but may go faster based on recency.
110 | ```
111 |
112 | ---
113 |
114 | ## 🏎 How CodeChrono Makes You Faster
115 |
116 | CodeChrono doesn’t just track what you did — it helps you build speed through self-awareness.
117 |
118 | ✅ **Know Your Real Benchmarks**
119 | Stop guessing how long something “should” take. See how long you actually took — and plan accordingly.
120 |
121 | 🔁 **Catch Time Sinks**
122 | Spot patterns in what slows you down (auth flows? test setups?) so you can simplify or automate them next time.
123 |
124 | 🧠 **Improve Through Feedback**
125 | Use summaries + durations as a personal feedback loop. Reflect, adjust, and optimize how you work.
126 |
127 | 🔮 **Estimate With Confidence**
128 | Replace hesitation with history-backed estimates. No more overbooking or under-planning your dev time.
129 |
130 | ---
131 |
132 |
133 | ## 🤖 Ask the LLM Questions
134 |
135 | CodeChrono isn’t just for tracking — it can answer smart questions based on your past dev sessions.
136 |
137 | You're already storing:
138 | - Timestamps
139 | - Descriptions
140 | - Summaries
141 | - Durations
142 |
143 | So you can build a new CLI command like:
144 | ```bash
145 | python run.py query -m "What features took the longest in the last month?"
146 | ```
147 |
148 | Behind the scenes, this:
149 | 1. Pulls relevant session summaries from SQLite
150 | 2. Sends them to the LLM with a prompt like:
151 |
152 | ```text
153 | Here's a list of my dev sessions from the past month. Please analyze:
154 | - Which tasks took the longest?
155 | - Are there any patterns or inefficiencies?
156 | - What types of work am I fastest at?
157 |
158 | Give recommendations if possible.
159 | ```
160 |
161 | This turns CodeChrono into a **local dev analyst** — not just a logger.
162 |
163 |
164 | ⚙️ Model Compatibility
165 | CodeChrono is designed and tested with the qwen3:14b-q4_K_M model.
166 |
167 | Qwen models often include reasoning blocks like ..., which CodeChrono strips automatically.
168 | If you're using a different model (e.g. mistral, llama2, etc.), those tags may not appear — or the output format may change entirely.
169 |
170 | To adjust for this, you can update your llm.py like so:
171 |
172 | ```python
173 | if "qwen" in MODEL_NAME:
174 | response = re.sub(r".*?", "", response, flags=re.DOTALL)
175 | ```
176 |
177 |
178 | ## ⚡️ Make It Frictionless
179 |
180 | If you're like me, you don't want to remember commands or activate virtual environments every time you build. Here's how to make CodeChrono always ready:
181 |
182 | Add these to your `.bashrc` or `.zshrc`:
183 | ```bash
184 | alias tcstart='python ~//codechrono/run.py start -m'
185 | alias tcstop='python ~//codechrono/run.py stop'
186 | alias tcreport='python ~//codechrono/run.py report'
187 | alias tcest='python ~//codechrono/run.py estimate -m'
188 | ```
189 |
190 | Then just type:
191 | ```bash
192 | tcstart "build auth"
193 | ```
194 |
195 | ---
--------------------------------------------------------------------------------