Skip to content

Commit

Permalink
Update: Day-07 2024 Part1 and Part2 completed
Browse files Browse the repository at this point in the history
  • Loading branch information
suhasksv committed Dec 7, 2024
1 parent c30e96d commit 4f78d39
Show file tree
Hide file tree
Showing 26 changed files with 1,660 additions and 0 deletions.
13 changes: 13 additions & 0 deletions .idea/kotlinc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Binary file added 2024/day-03/Elixir.MulParser.beam
Binary file not shown.
60 changes: 60 additions & 0 deletions 2024/day-04/main1.py.BAK
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
def read_grid(file_path):
"""Reads the grid from the given file and returns it as a list of lists."""
with open(file_path, 'r') as file:
return [list(line.strip()) for line in file.readlines()]

def count_word_occurrences(grid, word):
"""Counts all occurrences of a word in the grid in all directions."""
rows = len(grid)
cols = len(grid[0]) if rows > 0 else 0
word_len = len(word)
count = 0

# Directions: (row_delta, col_delta)
directions = [
(0, 1), # Horizontal right
(0, -1), # Horizontal left
(1, 0), # Vertical down
(-1, 0), # Vertical up
(1, 1), # Diagonal down-right
(1, -1), # Diagonal down-left
(-1, 1), # Diagonal up-right
(-1, -1) # Diagonal up-left
]

def is_valid(x, y):
"""Checks if a position is valid within the grid."""
return 0 <= x < rows and 0 <= y < cols

def check_direction(x, y, dx, dy):
"""Checks if the word exists starting at (x, y) in the given direction."""
for i in range(word_len):
nx, ny = x + i * dx, y + i * dy
if not is_valid(nx, ny) or grid[nx][ny] != word[i]:
return False
return True

# Search for the word in all directions from each grid position
for r in range(rows):
for c in range(cols):
for dx, dy in directions:
if check_direction(r, c, dx, dy):
count += 1

return count

if __name__ == "__main__":
# Specify the input file name
input_file = "input.txt"

# Read the grid from the file
grid = read_grid(input_file)

# Word to search for
target_word = "XMAS"

# Count occurrences of the word
total_occurrences = count_word_occurrences(grid, target_word)

# Output the result
print(f"The word '{target_word}' appears {total_occurrences} times in the grid.")
52 changes: 52 additions & 0 deletions 2024/day-05/main1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import sys
import re
from collections import defaultdict, Counter, deque
import pyperclip as pc
def pr(s):
print(s)
pc.copy(s)
sys.setrecursionlimit(10**6)
infile = sys.arg[1] if len(sys.arg) >= 2 else 'input.txt'
p1 = 0
p2 = 0
D = open(infile).read().strip()

# E[x] is the set of pages that must come before x
# ER[x] is the set of pages that must come after x
E = defaultdict(set)
ER = defaultdict(set)
edges, queries = D.split('\n\n')
for line in edges.split('\n'):
x,y = line.split('|')
x,y = int(x), int(y)
E[y].add(x)
ER[x].add(y)

for query in queries.split('\n'):
vs = [int(x) for x in query.split(',')]
assert len(vs)%2==1
ok = True
for i,x in enumerate(vs):
for j,y in enumerate(vs):
if i<j and y in E[x]:
ok = False
if ok:
p1 += vs[len(vs)//2]
else:
good = []
Q = deque([])
D = {v: len(E[v] & set(vs)) for v in vs}
for v in vs:
if D[v] == 0:
Q.append(v)
while Q:
x = Q.popleft()
good.append(x)
for y in ER[x]:
if y in D:
D[y] -= 1
if D[y] == 0:
Q.append(y)
p2 += good[len(good)//2]
pr(p1)
pr(p2)
73 changes: 73 additions & 0 deletions 2024/day-05/main2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import sys
import re
from collections import defaultdict, deque
import pyperclip as pc

def pr(s):
"""Prints and copies the result to clipboard."""
print(s)
pc.copy(s)

# Set recursion limit for deep recursion scenarios
sys.setrecursionlimit(10**6)

# Input file handling
infile = sys.argv[1] if len(sys.argv) >= 2 else 'input.txt'
p1 = 0 # Part 1 result
p2 = 0 # Part 2 result
D = open(infile).read().strip()

# Graph representations
E = defaultdict(set) # Edges pointing to each node
ER = defaultdict(set) # Reverse edges for each node

# Parse input into edges and queries
edges, queries = D.split('\n\n')

# Build the graph
for line in edges.split('\n'):
x, y = map(int, line.split('|'))
E[y].add(x) # y depends on x
ER[x].add(y) # x is a prerequisite for y

# Process each query
for query in queries.split('\n'):
vs = [int(x) for x in query.split(',')]
assert len(vs) % 2 == 1 # Ensure the list has an odd length

# Check if the query satisfies the dependency constraints
ok = True
for i, x in enumerate(vs):
for j, y in enumerate(vs):
if i < j and y in E[x]: # If y depends on x but appears later
ok = False

if ok:
# If valid, add the middle node to p1
p1 += vs[len(vs) // 2]
else:
# Perform a topological sort for Part 2
good = [] # Topologically sorted nodes
Q = deque() # Queue for BFS
D = {v: len(E[v] & set(vs)) for v in vs} # Count dependencies in query

# Initialize the queue with nodes having no dependencies
for v in vs:
if D[v] == 0:
Q.append(v)

while Q:
x = Q.popleft()
good.append(x)
for y in ER[x]:
if y in D:
D[y] -= 1
if D[y] == 0:
Q.append(y)

# Add the middle node of the sorted list to p2
p2 += good[len(good) // 2]

# Output results
pr(p1)
pr(p2)
Loading

0 comments on commit 4f78d39

Please sign in to comment.