73 lines
1.3 KiB
Python
73 lines
1.3 KiB
Python
from functools import cache
|
|
|
|
with open('input.txt', 'r') as file:
|
|
data: list[str] = file.readlines()
|
|
|
|
parts: list[str] = data[0].strip().split(', ')
|
|
|
|
starting_letter: dict[str, str] = {}
|
|
|
|
for part in parts:
|
|
first_char = part[0]
|
|
|
|
if first_char not in starting_letter:
|
|
starting_letter[first_char] = [part]
|
|
else:
|
|
starting_letter[first_char].append(part)
|
|
|
|
designs = [design.strip() for design in data[2:]]
|
|
|
|
# this solution is pretty slow and stupid but it works, i think some variation of KMP would be faster? like a graph based search with a large amount of pre-processing
|
|
# Could maybe use memoization on substrings
|
|
@cache
|
|
def search_designs(design: str) -> bool:
|
|
if not design:
|
|
return True
|
|
|
|
first_char = design[0]
|
|
|
|
if first_char not in starting_letter:
|
|
return False
|
|
|
|
possible_matches = starting_letter[first_char]
|
|
|
|
possible = []
|
|
for possible_match in possible_matches:
|
|
if possible_match == design[:len(possible_match)]:
|
|
possible.append(design.replace(possible_match, ''))
|
|
|
|
return any([search_designs(m) for m in possible])
|
|
|
|
|
|
possible_count = 0
|
|
for i, design in enumerate(designs):
|
|
if search_designs(design):
|
|
print(f'Design {i} finished search')
|
|
possible_count += 1
|
|
|
|
print(f'Possible Designs: {possible_count}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|