You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
105 lines
3.4 KiB
Python
105 lines
3.4 KiB
Python
#!/usr/bin/env python3
|
|
|
|
#TODO: make sure things dont explode no matter what terminal size
|
|
# -> prevent linebreaks in a single story
|
|
# -> only load as many stories as fit
|
|
|
|
import requests
|
|
from bs4 import BeautifulSoup as Soup
|
|
import curses
|
|
import webbrowser
|
|
|
|
from dataclasses import dataclass
|
|
|
|
spinner_states = ['-', '\\', '|', '/']
|
|
|
|
@dataclass
|
|
class Story:
|
|
id: int
|
|
title: str
|
|
link: str
|
|
author: str
|
|
votes: int
|
|
comments: int
|
|
|
|
def get_topstories():
|
|
# Query Hacker News API for top stories and their titles/links
|
|
# returns the top 500 stories, so quite enough.
|
|
url = 'https://hacker-news.firebaseio.com/v0/topstories.json'
|
|
r = requests.get(url)
|
|
if not r.ok:
|
|
raise Exception('Error fetching data from Hacker News API')
|
|
return r.json()
|
|
|
|
def get_story(story_id):
|
|
story_url = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json'
|
|
s = requests.get(story_url).json()
|
|
return Story(s['id'],
|
|
s['title'],
|
|
s['url'] if 'url' in s else 'No URL',
|
|
s['by'],
|
|
s['score'],
|
|
len(s['kids']) if 'kids' in s else 0)
|
|
|
|
|
|
def footer(stdscr, content):
|
|
stdscr.addstr(curses.LINES-1, 0, content, curses.A_REVERSE)
|
|
|
|
|
|
def main(stdscr):
|
|
stdscr.clear()
|
|
|
|
_, width = stdscr.getmaxyx();
|
|
|
|
num_stories = curses.LINES - 3 # headline, detail, footer
|
|
topstories = get_topstories()
|
|
stories = []
|
|
for idx, i in enumerate(topstories[:num_stories]):
|
|
stdscr.clear()
|
|
footer(stdscr, f'[{spinner_states[idx%4]}] Getting stories...')
|
|
stdscr.refresh()
|
|
stories.append(get_story(i))
|
|
|
|
# Display list of stories in terminal window with arrow key navigation
|
|
current_pos = 0
|
|
while True:
|
|
stdscr.clear()
|
|
stdscr.addstr('Hacker News Top Stories:\n')
|
|
for i, story in enumerate(stories):
|
|
prefix = '>>> ' if i == current_pos else ' '
|
|
# calculate length of line
|
|
text = f'{prefix} ()\n'
|
|
chars_available = width - len(text)
|
|
max_title_len = min((chars_available//3)*2, len(story.title))
|
|
max_url_len = chars_available - max_title_len
|
|
|
|
title = story.title[:max_title_len-1] + "…" if len(story.title) > max_title_len else story.title
|
|
link = story.link.replace('https://', '').replace('http://', '')
|
|
link = link[:max_url_len-1] + "…" if len(link) > max_url_len else link
|
|
|
|
text = '{}{} ({})\n'.format(prefix, title, link.replace('https://', '').replace('http://', ''))
|
|
stdscr.addstr(text)
|
|
if i == current_pos:
|
|
detail = f' by {story.author} | {story.comments} comments | {story.votes} points\n'
|
|
stdscr.addstr(detail)
|
|
footer(stdscr, f'Loaded {num_stories} stories.')
|
|
|
|
stdscr.refresh()
|
|
c = stdscr.getch()
|
|
if c == ord('q'): # Quit
|
|
break
|
|
elif c == curses.KEY_UP:
|
|
current_pos -= 1
|
|
if current_pos < 0:
|
|
current_pos = len(stories)-1
|
|
elif c == curses.KEY_DOWN:
|
|
current_pos += 1
|
|
if current_pos >= len(stories):
|
|
current_pos = 0
|
|
elif c == ord('c'):
|
|
webbrowser.open(f'https://news.ycombinator.com/item?id={stories[current_pos].id}')
|
|
elif c == curses.KEY_ENTER or c == 10:
|
|
webbrowser.open(stories[current_pos].link)
|
|
|
|
curses.wrapper(main)
|