|
|
|
|
@@ -23,8 +23,8 @@ QUEUE_DAEMON_PID_FILE="${QUEUE_DAEMON_PID_FILE:-$QUEUE_DIR/daemon.pid}"
|
|
|
|
|
QUEUE_DAEMON_LOCK_FILE="${QUEUE_DAEMON_LOCK_FILE:-$QUEUE_DIR/daemon.lock}"
|
|
|
|
|
QUEUE_DAEMON_LOG_FILE="${QUEUE_DAEMON_LOG_FILE:-$QUEUE_DIR/daemon.log}"
|
|
|
|
|
QUEUE_DAEMON_INTERVAL_MINUTES="${QUEUE_DAEMON_INTERVAL_MINUTES:-5}"
|
|
|
|
|
QUEUE_DAEMON_BATCH_SIZE="${QUEUE_DAEMON_BATCH_SIZE:-2}"
|
|
|
|
|
QUEUE_CLEANUP_AGE_DAYS="${QUEUE_CLEANUP_AGE_DAYS:-7}"
|
|
|
|
|
TASK_TIMEOUT_HOURS="${TASK_TIMEOUT_HOURS:-1}"
|
|
|
|
|
|
|
|
|
|
# Load user config overrides (~/.kugetsu/config)
|
|
|
|
|
if [ -f "$KUGETSU_DIR/config" ]; then
|
|
|
|
|
@@ -62,7 +62,7 @@ count_active_dev_sessions() {
|
|
|
|
|
for session_file in "$SESSIONS_DIR"/*.json; do
|
|
|
|
|
if [ -f "$session_file" ]; then
|
|
|
|
|
local filename=$(basename "$session_file")
|
|
|
|
|
if [ "$filename" != "base.json" ]; then
|
|
|
|
|
if [ "$filename" != "base.json" ] && [ "$filename" != "pm-agent.json" ]; then
|
|
|
|
|
count=$((count + 1))
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
@@ -531,6 +531,8 @@ with open("$QUEUE_ITEMS_DIR/${queue_id}.json", "w") as f:
|
|
|
|
|
|
|
|
|
|
print(f"Enqueued: $queue_id")
|
|
|
|
|
PYEOF
|
|
|
|
|
|
|
|
|
|
kugetsu_add_notification "task_queued" "Task queued: $issue_ref" "$issue_ref"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
get_pending_tasks() {
|
|
|
|
|
@@ -576,6 +578,8 @@ get_queue_stats() {
|
|
|
|
|
update_queue_item_state() {
|
|
|
|
|
local queue_id="$1"
|
|
|
|
|
local new_state="$2"
|
|
|
|
|
local session_id="${3:-}"
|
|
|
|
|
local pid="${4:-}"
|
|
|
|
|
|
|
|
|
|
local item_file="$QUEUE_ITEMS_DIR/${queue_id}.json"
|
|
|
|
|
if [ ! -f "$item_file" ]; then
|
|
|
|
|
@@ -585,22 +589,33 @@ update_queue_item_state() {
|
|
|
|
|
|
|
|
|
|
python3 << PYEOF
|
|
|
|
|
import json
|
|
|
|
|
import os
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
item_file = "$item_file"
|
|
|
|
|
new_state = "$new_state"
|
|
|
|
|
session_id = "$session_id"
|
|
|
|
|
pid = "$pid"
|
|
|
|
|
|
|
|
|
|
with open(item_file, 'r') as f:
|
|
|
|
|
item = json.load(f)
|
|
|
|
|
|
|
|
|
|
issue_ref = item.get('issue_ref', '')
|
|
|
|
|
|
|
|
|
|
item['state'] = new_state
|
|
|
|
|
|
|
|
|
|
if new_state == "notified":
|
|
|
|
|
item['notified_at'] = datetime.now().isoformat() + "Z"
|
|
|
|
|
if session_id:
|
|
|
|
|
item['opencode_session_id'] = session_id
|
|
|
|
|
if pid:
|
|
|
|
|
item['pid'] = int(pid) if pid.isdigit() else None
|
|
|
|
|
elif new_state == "completed":
|
|
|
|
|
item['completed_at'] = datetime.now().isoformat() + "Z"
|
|
|
|
|
os.system(f"kugetsu_add_notification 'task_completed' 'Task completed: {issue_ref}' '{issue_ref}'")
|
|
|
|
|
elif new_state == "error":
|
|
|
|
|
item['error'] = datetime.now().isoformat() + "Z"
|
|
|
|
|
os.system(f"kugetsu_add_notification 'task_error' 'Task error: {issue_ref}' '{issue_ref}'")
|
|
|
|
|
|
|
|
|
|
with open(item_file, 'w') as f:
|
|
|
|
|
json.dump(item, f, indent=2)
|
|
|
|
|
@@ -609,6 +624,83 @@ print(f"Updated $queue_id to state: $new_state")
|
|
|
|
|
PYEOF
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
check_task_timeouts() {
|
|
|
|
|
if [ ! -d "$QUEUE_ITEMS_DIR" ]; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local timeout_hours="${TASK_TIMEOUT_HOURS:-1}"
|
|
|
|
|
|
|
|
|
|
for item in "$QUEUE_ITEMS_DIR"/*.json; do
|
|
|
|
|
[ -f "$item" ] || continue
|
|
|
|
|
|
|
|
|
|
local state=$(python3 -c "import json; print(json.load(open('$item')).get('state', ''))" 2>/dev/null)
|
|
|
|
|
if [ "$state" != "notified" ]; then
|
|
|
|
|
continue
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local notified_at=$(python3 -c "import json; print(json.load(open('$item')).get('notified_at', ''))" 2>/dev/null)
|
|
|
|
|
if [ -z "$notified_at" ]; then
|
|
|
|
|
continue
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local queue_id=$(basename "$item" .json)
|
|
|
|
|
local pid=$(python3 -c "import json; print(json.load(open('$item')).get('pid', ''))" 2>/dev/null)
|
|
|
|
|
local session_id=$(python3 -c "import json; print(json.load(open('$item')).get('opencode_session_id', ''))" 2>/dev/null)
|
|
|
|
|
|
|
|
|
|
local notified_epoch=$(date -d "$notified_at" +%s 2>/dev/null || echo "0")
|
|
|
|
|
local now_epoch=$(date +%s)
|
|
|
|
|
local hours_elapsed=$(( (now_epoch - notified_epoch) / 3600 ))
|
|
|
|
|
|
|
|
|
|
if [ "$hours_elapsed" -ge "$timeout_hours" ]; then
|
|
|
|
|
echo "Task $queue_id timed out after ${hours_elapsed}h (limit: ${timeout_hours}h)"
|
|
|
|
|
|
|
|
|
|
if [ -n "$pid" ] && kill -0 "$pid" 2>/dev/null; then
|
|
|
|
|
echo "Killing process $pid"
|
|
|
|
|
kill "$pid" 2>/dev/null || true
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if [ -n "$session_id" ]; then
|
|
|
|
|
local worktree_path=""
|
|
|
|
|
for session_file in "$SESSIONS_DIR"/*.json; do
|
|
|
|
|
[ -f "$session_file" ] || continue
|
|
|
|
|
local sess_id=$(python3 -c "import json; print(json.load(open('$session_file')).get('opencode_session_id', ''))" 2>/dev/null)
|
|
|
|
|
if [ "$sess_id" = "$session_id" ]; then
|
|
|
|
|
worktree_path=$(python3 -c "import json; print(json.load(open('$session_file')).get('worktree_path', ''))" 2>/dev/null)
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ -n "$worktree_path" ]; then
|
|
|
|
|
pkill -f "opencode.*$worktree_path" 2>/dev/null || true
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
update_queue_item_state "$queue_id" "error"
|
|
|
|
|
|
|
|
|
|
local issue_ref=$(python3 -c "import json; print(json.load(open('$item')).get('issue_ref', ''))" 2>/dev/null)
|
|
|
|
|
if [ -n "$issue_ref" ]; then
|
|
|
|
|
local session_file=$(get_session_for_issue "$issue_ref")
|
|
|
|
|
if [ -n "$session_file" ] && [ "$session_file" != "null" ]; then
|
|
|
|
|
python3 << PYEOF
|
|
|
|
|
import json
|
|
|
|
|
session_path = "$SESSIONS_DIR/$session_file"
|
|
|
|
|
try:
|
|
|
|
|
with open(session_path, 'r') as f:
|
|
|
|
|
session = json.load(f)
|
|
|
|
|
session['state'] = 'timeout'
|
|
|
|
|
with open(session_path, 'w') as f:
|
|
|
|
|
json.dump(session, f, indent=2)
|
|
|
|
|
print(f"Marked session for $issue_ref as timeout")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"Error marking session: {e}")
|
|
|
|
|
PYEOF
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cleanup_old_queue_items() {
|
|
|
|
|
local days="${QUEUE_CLEANUP_AGE_DAYS:-7}"
|
|
|
|
|
|
|
|
|
|
@@ -1053,6 +1145,11 @@ parse_issue_ref_from_message() {
|
|
|
|
|
owner=$(echo "$full_path" | cut -d'/' -f2)
|
|
|
|
|
repo=$(echo "$full_path" | cut -d'/' -f3)
|
|
|
|
|
issue_number=$(echo "$full_path" | grep -oE '[0-9]+$' | head -1)
|
|
|
|
|
elif echo "$message" | grep -qE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+#[0-9]+'; then
|
|
|
|
|
gitserver=$(echo "$message" | grep -oE '[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+' | head -1)
|
|
|
|
|
owner=$(echo "$gitserver" | cut -d'/' -f2)
|
|
|
|
|
repo=$(echo "$gitserver" | cut -d'/' -f3)
|
|
|
|
|
issue_number=$(echo "$message" | grep -oE '#[0-9]+' | grep -oE '[0-9]+' | head -1)
|
|
|
|
|
elif echo "$message" | grep -qE '[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+#([0-9]+)'; then
|
|
|
|
|
owner=$(echo "$message" | grep -oE '[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+#' | sed 's/#$//' | cut -d'/' -f1)
|
|
|
|
|
repo=$(echo "$message" | grep -oE '[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+#' | sed 's/#$//' | cut -d'/' -f2)
|
|
|
|
|
@@ -1267,6 +1364,7 @@ queue_daemon_loop() {
|
|
|
|
|
exit 0
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
check_task_timeouts
|
|
|
|
|
process_queue
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
@@ -1279,22 +1377,15 @@ process_queue() {
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local available_slots=$((MAX_CONCURRENT_AGENTS - active_count))
|
|
|
|
|
local batch_size=$QUEUE_DAEMON_BATCH_SIZE
|
|
|
|
|
[ "$batch_size" -gt "$available_slots" ] && batch_size=$available_slots
|
|
|
|
|
|
|
|
|
|
if [ "$batch_size" -le 0 ]; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local pm_session=$(get_pm_agent_session_id)
|
|
|
|
|
if [ -z "$pm_session" ] || [ "$pm_session" = "null" ]; then
|
|
|
|
|
if [ "$available_slots" -le 0 ]; then
|
|
|
|
|
return
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local count=0
|
|
|
|
|
for item in $(ls -t "$QUEUE_ITEMS_DIR"/*.json 2>/dev/null | head -20); do
|
|
|
|
|
[ $count -ge "$available_slots" ] && break
|
|
|
|
|
[ -f "$item" ] || continue
|
|
|
|
|
[ $count -ge "$batch_size" ] && break
|
|
|
|
|
|
|
|
|
|
local state=$(python3 -c "import json; print(json.load(open('$item')).get('state', ''))" 2>/dev/null)
|
|
|
|
|
if [ "$state" != "pending" ]; then
|
|
|
|
|
@@ -1302,30 +1393,49 @@ process_queue() {
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local queue_id=$(basename "$item" .json)
|
|
|
|
|
local issue_ref=$(python3 -c "import json; print(json.load(open('$item')).get('issue_ref', '')" 2>/dev/null)
|
|
|
|
|
local message=$(python3 -c "import json; print(json.load(open('$item')).get('message', '')" 2>/dev/null)
|
|
|
|
|
local issue_ref=$(python3 -c "import json; print(json.load(open('$item')).get('issue_ref', ''))" 2>/dev/null)
|
|
|
|
|
local message=$(python3 -c "import json; print(json.load(open('$item')).get('message', ''))" 2>/dev/null)
|
|
|
|
|
|
|
|
|
|
if [ -z "$issue_ref" ] || [ -z "$message" ]; then
|
|
|
|
|
continue
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
update_queue_item_state "$queue_id" "notified"
|
|
|
|
|
kugetsu_add_notification "task_dequeued" "Task dequeued: $issue_ref" "$issue_ref"
|
|
|
|
|
|
|
|
|
|
local log_file="$LOGS_DIR/delegate-${queue_id}.log"
|
|
|
|
|
mkdir -p "$LOGS_DIR"
|
|
|
|
|
|
|
|
|
|
local env_sh="set -a; "
|
|
|
|
|
if [ -f "$ENV_DIR/pm-agent.env" ]; then
|
|
|
|
|
env_sh="${env_sh}source '$ENV_DIR/pm-agent.env'; "
|
|
|
|
|
elif [ -f "$ENV_DIR/default.env" ]; then
|
|
|
|
|
env_sh="${env_sh}source '$ENV_DIR/default.env'; "
|
|
|
|
|
local max_retries=3
|
|
|
|
|
local attempt=1
|
|
|
|
|
local success=false
|
|
|
|
|
local fork_pid=""
|
|
|
|
|
|
|
|
|
|
while [ $attempt -le $max_retries ]; do
|
|
|
|
|
if kugetsu start "$issue_ref" "$message" >> "$log_file" 2>&1; then
|
|
|
|
|
success=true
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "Attempt $attempt failed for $queue_id, cleaning up..." >> "$log_file"
|
|
|
|
|
|
|
|
|
|
local session_file="$(issue_ref_to_filename "$issue_ref").json"
|
|
|
|
|
local worktree_path=$(issue_ref_to_worktree_path "$issue_ref" "$PWD")
|
|
|
|
|
|
|
|
|
|
[ -f "$SESSIONS_DIR/$session_file" ] && rm -f "$SESSIONS_DIR/$session_file"
|
|
|
|
|
worktree_exists "$issue_ref" "$PWD" && remove_worktree_for_issue "$issue_ref" "$PWD"
|
|
|
|
|
remove_issue_from_index "$issue_ref" 2>/dev/null || true
|
|
|
|
|
|
|
|
|
|
attempt=$((attempt + 1))
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ "$success" = true ]; then
|
|
|
|
|
echo "Started task $queue_id: $issue_ref"
|
|
|
|
|
count=$((count + 1))
|
|
|
|
|
else
|
|
|
|
|
echo "Failed to start task $queue_id after $max_retries attempts"
|
|
|
|
|
update_queue_item_state "$queue_id" "pending"
|
|
|
|
|
fi
|
|
|
|
|
env_sh="${env_sh}set +a; "
|
|
|
|
|
|
|
|
|
|
nohup sh -c "${env_sh}opencode run 'Delegate task: ${message}' --continue --session '$pm_session'" >> "$log_file" 2>&1 &
|
|
|
|
|
|
|
|
|
|
echo "Queued task $queue_id for PM agent"
|
|
|
|
|
count=$((count + 1))
|
|
|
|
|
done
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1971,20 +2081,10 @@ cmd_start() {
|
|
|
|
|
create_worktree "$issue_ref" "$parent_dir"
|
|
|
|
|
|
|
|
|
|
local session_file="$(issue_ref_to_filename "$issue_ref").json"
|
|
|
|
|
|
|
|
|
|
echo "Forking session for '$issue_ref'..."
|
|
|
|
|
|
|
|
|
|
# Session-counting: count actual dev sessions, reject if at limit
|
|
|
|
|
local active_count=$(count_active_dev_sessions)
|
|
|
|
|
if [ "$active_count" -ge "$MAX_CONCURRENT_AGENTS" ]; then
|
|
|
|
|
echo "Error: Max concurrent agents ($MAX_CONCURRENT_AGENTS) reached" >&2
|
|
|
|
|
echo "Active sessions: $active_count" >&2
|
|
|
|
|
remove_worktree_for_issue "$issue_ref" "$parent_dir"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local fork_log="$SESSIONS_DIR/$session_file.fork.log"
|
|
|
|
|
local opencode_db="${OPENCODE_DB:-$HOME/.local/share/opencode/opencode.db}"
|
|
|
|
|
local lock_file="$KUGETSU_DIR/.session_lock"
|
|
|
|
|
local lock_fd=200
|
|
|
|
|
|
|
|
|
|
> "$fork_log"
|
|
|
|
|
|
|
|
|
|
@@ -1997,25 +2097,38 @@ ${previous_context}
|
|
|
|
|
## YOUR TASK
|
|
|
|
|
$message"
|
|
|
|
|
|
|
|
|
|
fix_session_permissions
|
|
|
|
|
|
|
|
|
|
if [ "$DEBUG_MODE" = true ]; then
|
|
|
|
|
(cd "$worktree_path" && opencode run "$full_message" --fork --session "$base_session_id" --dir "$worktree_path" 2>&1) | tee "$fork_log" &
|
|
|
|
|
else
|
|
|
|
|
(cd "$worktree_path" && opencode run "$full_message" --fork --session "$base_session_id" --dir "$worktree_path" 2>&1) >> "$fork_log" &
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local fork_pid=$!
|
|
|
|
|
|
|
|
|
|
local max_attempts=10
|
|
|
|
|
local attempt=1
|
|
|
|
|
local new_session_id=""
|
|
|
|
|
local fork_log_output=""
|
|
|
|
|
|
|
|
|
|
while [ $attempt -le $max_attempts ]; do
|
|
|
|
|
sleep 1
|
|
|
|
|
(
|
|
|
|
|
flock -x $lock_fd
|
|
|
|
|
|
|
|
|
|
new_session_id=$(python3 -c "
|
|
|
|
|
local active_count=$(count_active_dev_sessions)
|
|
|
|
|
if [ "$active_count" -ge "$MAX_CONCURRENT_AGENTS" ]; then
|
|
|
|
|
echo "Error: Max concurrent agents ($MAX_CONCURRENT_AGENTS) reached" >&2
|
|
|
|
|
echo "Active sessions: $active_count" >&2
|
|
|
|
|
remove_worktree_for_issue "$issue_ref" "$parent_dir"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "Forking session for '$issue_ref'..."
|
|
|
|
|
|
|
|
|
|
fix_session_permissions
|
|
|
|
|
|
|
|
|
|
if [ "$DEBUG_MODE" = true ]; then
|
|
|
|
|
(cd "$worktree_path" && opencode run "$full_message" --fork --session "$base_session_id" --dir "$worktree_path" 2>&1) | tee "$fork_log" &
|
|
|
|
|
else
|
|
|
|
|
(cd "$worktree_path" && opencode run "$full_message" --fork --session "$base_session_id" --dir "$worktree_path" 2>&1) >> "$fork_log" &
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local fork_pid=$!
|
|
|
|
|
|
|
|
|
|
local max_attempts=10
|
|
|
|
|
local attempt=1
|
|
|
|
|
local new_session_id=""
|
|
|
|
|
local fork_log_output=""
|
|
|
|
|
|
|
|
|
|
while [ $attempt -le $max_attempts ]; do
|
|
|
|
|
sleep 1
|
|
|
|
|
|
|
|
|
|
new_session_id=$(python3 -c "
|
|
|
|
|
import sqlite3
|
|
|
|
|
conn = sqlite3.connect('$opencode_db')
|
|
|
|
|
cursor = conn.cursor()
|
|
|
|
|
@@ -2024,31 +2137,31 @@ result = cursor.fetchone()
|
|
|
|
|
if result:
|
|
|
|
|
print(result[0])
|
|
|
|
|
" 2>/dev/null || echo "")
|
|
|
|
|
|
|
|
|
|
if [ -n "$new_session_id" ] && [ "$new_session_id" != "$base_session_id" ] && [ "$new_session_id" != "$pm_agent_session_id" ]; then
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if ! kill -0 $fork_pid 2>/dev/null; then
|
|
|
|
|
fork_log_output=$(tail -20 "$fork_log" 2>/dev/null || echo "(log empty or unavailable)")
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
attempt=$((attempt + 1))
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ -n "$new_session_id" ] && [ "$new_session_id" != "$base_session_id" ] && [ "$new_session_id" != "$pm_agent_session_id" ]; then
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
if ! kill -0 $fork_pid 2>/dev/null; then
|
|
|
|
|
fork_log_output=$(tail -20 "$fork_log" 2>/dev/null || echo "(log empty or unavailable)")
|
|
|
|
|
break
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
attempt=$((attempt + 1))
|
|
|
|
|
done
|
|
|
|
|
|
|
|
|
|
if [ -z "$new_session_id" ]; then
|
|
|
|
|
echo "Error: Could not find newly created session after ${max_attempts}s" >&2
|
|
|
|
|
if [ -n "$fork_log_output" ]; then
|
|
|
|
|
echo "Fork log output:" >&2
|
|
|
|
|
echo "$fork_log_output" >&2
|
|
|
|
|
if [ -z "$new_session_id" ]; then
|
|
|
|
|
echo "Error: Could not find newly created session after ${max_attempts}s" >&2
|
|
|
|
|
if [ -n "$fork_log_output" ]; then
|
|
|
|
|
echo "Fork log output:" >&2
|
|
|
|
|
echo "$fork_log_output" >&2
|
|
|
|
|
fi
|
|
|
|
|
remove_worktree_for_issue "$issue_ref"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
remove_worktree_for_issue "$issue_ref"
|
|
|
|
|
exit 1
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "Updating permissions for new session: $new_session_id"
|
|
|
|
|
python3 -c "
|
|
|
|
|
echo "Updating permissions for new session: $new_session_id"
|
|
|
|
|
python3 -c "
|
|
|
|
|
import sqlite3
|
|
|
|
|
conn = sqlite3.connect('$opencode_db')
|
|
|
|
|
cursor = conn.cursor()
|
|
|
|
|
@@ -2058,9 +2171,9 @@ conn.commit()
|
|
|
|
|
print('[OK] Session permissions updated')
|
|
|
|
|
"
|
|
|
|
|
|
|
|
|
|
if [ "$DEBUG_MODE" = true ]; then
|
|
|
|
|
echo "[DEBUG] Forked session permissions check:"
|
|
|
|
|
python3 -c "
|
|
|
|
|
if [ "$DEBUG_MODE" = true ]; then
|
|
|
|
|
echo "[DEBUG] Forked session permissions check:"
|
|
|
|
|
python3 -c "
|
|
|
|
|
import sqlite3
|
|
|
|
|
conn = sqlite3.connect('$opencode_db')
|
|
|
|
|
cursor = conn.cursor()
|
|
|
|
|
@@ -2070,11 +2183,11 @@ for row in cursor.fetchall():
|
|
|
|
|
print(' Directory:', row[1])
|
|
|
|
|
print(' Permission:', row[2])
|
|
|
|
|
" 2>/dev/null || echo " (failed to query DB)"
|
|
|
|
|
fi
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
local branch_name=$(issue_ref_to_branch_name "$issue_ref")
|
|
|
|
|
|
|
|
|
|
python3 << PYEOF > "$SESSIONS_DIR/$session_file"
|
|
|
|
|
local branch_name=$(issue_ref_to_branch_name "$issue_ref")
|
|
|
|
|
|
|
|
|
|
python3 << PYEOF > "$SESSIONS_DIR/$session_file"
|
|
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
session = {
|
|
|
|
|
@@ -2092,12 +2205,15 @@ with open("$SESSIONS_DIR/$session_file", "w") as f:
|
|
|
|
|
json.dump(session, f, indent=2)
|
|
|
|
|
PYEOF
|
|
|
|
|
|
|
|
|
|
add_issue_to_index "$issue_ref" "$session_file"
|
|
|
|
|
|
|
|
|
|
kugetsu_context_dump "$issue_ref" "$message" "$branch_name"
|
|
|
|
|
add_issue_to_index "$issue_ref" "$session_file"
|
|
|
|
|
|
|
|
|
|
kugetsu_context_dump "$issue_ref" "$message" "$branch_name"
|
|
|
|
|
|
|
|
|
|
kugetsu_add_notification "task_started" "Task started: $issue_ref" "$issue_ref"
|
|
|
|
|
|
|
|
|
|
echo "Session started for '$issue_ref': $new_session_id"
|
|
|
|
|
echo "Worktree: $worktree_path"
|
|
|
|
|
echo "Session started for '$issue_ref': $new_session_id"
|
|
|
|
|
echo "Worktree: $worktree_path"
|
|
|
|
|
) 200>"$lock_file"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cmd_continue() {
|
|
|
|
|
|