|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
#TODO: make sure things dont explode no matter what terminal size
|
|
|
|
# -> prevent linebreaks in a single story
|
|
|
|
# -> only load as many stories as fit
|
|
|
|
|
|
|
|
import requests
|
|
|
|
from bs4 import BeautifulSoup as Soup
|
|
|
|
import curses
|
|
|
|
import webbrowser
|
|
|
|
|
|
|
|
from dataclasses import dataclass
|
|
|
|
|
|
|
|
spinner_states = ['-', '\\', '|', '/']
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Story:
|
|
|
|
id: int
|
|
|
|
title: str
|
|
|
|
link: str
|
|
|
|
author: str
|
|
|
|
votes: int
|
|
|
|
comments: int
|
|
|
|
|
|
|
|
def main(stdscr):
|
|
|
|
stdscr.clear()
|
|
|
|
|
|
|
|
height, width = stdscr.getmaxyx()
|
|
|
|
num_stories = curses.LINES - 2 # headline and detail
|
|
|
|
# Query Hacker News API for top stories and their titles/links
|
|
|
|
url = 'https://hacker-news.firebaseio.com/v0/topstories.json'
|
|
|
|
r = requests.get(url)
|
|
|
|
if not r.ok:
|
|
|
|
raise Exception('Error fetching data from Hacker News API')
|
|
|
|
ids = r.json()[:10]
|
|
|
|
stories = []
|
|
|
|
for idx, i in enumerate(ids):
|
|
|
|
stdscr.clear()
|
|
|
|
stdscr.addstr(f'{num_stories}\n')
|
|
|
|
stdscr.addstr(f'[{spinner_states[idx%4]}] Getting stories...')
|
|
|
|
stdscr.refresh()
|
|
|
|
story_url = f'https://hacker-news.firebaseio.com/v0/item/{i}.json'
|
|
|
|
s = requests.get(story_url).json()
|
|
|
|
stories.append(Story(s['id'],
|
|
|
|
s['title'],
|
|
|
|
s['url'] if 'url' in s else 'No URL',
|
|
|
|
s['by'], s['score'],
|
|
|
|
len(s['kids']) if 'kids' in s else 0))
|
|
|
|
|
|
|
|
# Display list of stories in terminal window with arrow key navigation
|
|
|
|
current_pos = 0
|
|
|
|
while True:
|
|
|
|
stdscr.clear()
|
|
|
|
stdscr.addstr('Hacker News Top Stories:\n')
|
|
|
|
for i, story in enumerate(stories):
|
|
|
|
prefix = '>>> ' if i == current_pos else ' '
|
|
|
|
# calculate length of line
|
|
|
|
text = f'{prefix} ()\n'
|
|
|
|
chars_available = width - len(text)
|
|
|
|
max_title_len = min((chars_available//3)*2, len(story.title))
|
|
|
|
max_url_len = chars_available - max_title_len
|
|
|
|
|
|
|
|
title = story.title[:max_title_len-1] + "…" if len(story.title) > max_title_len else story.title
|
|
|
|
link = story.link.replace('https://', '').replace('http://', '')
|
|
|
|
link = link[:max_url_len-1] + "…" if len(link) > max_url_len else link
|
|
|
|
|
|
|
|
text = '{}{} ({})\n'.format(prefix, title, link.replace('https://', '').replace('http://', ''))
|
|
|
|
stdscr.addstr(text)
|
|
|
|
if i == current_pos:
|
|
|
|
detail = f' by {story.author} | {story.comments} comments | {story.votes} points\n'
|
|
|
|
stdscr.addstr(detail)
|
|
|
|
|
|
|
|
stdscr.refresh()
|
|
|
|
c = stdscr.getch()
|
|
|
|
if c == ord('q'): # Quit
|
|
|
|
break
|
|
|
|
elif c == curses.KEY_UP:
|
|
|
|
current_pos -= 1
|
|
|
|
if current_pos < 0:
|
|
|
|
current_pos = len(stories)-1
|
|
|
|
elif c == curses.KEY_DOWN:
|
|
|
|
current_pos += 1
|
|
|
|
if current_pos >= len(stories):
|
|
|
|
current_pos = 0
|
|
|
|
elif c == ord('c'):
|
|
|
|
webbrowser.open(f'https://news.ycombinator.com/item?id={stories[current_pos].id}')
|
|
|
|
elif c == curses.KEY_ENTER or c == 10:
|
|
|
|
webbrowser.open(stories[current_pos].link)
|
|
|
|
|
|
|
|
curses.wrapper(main)
|