Initial commit - gophercgis - Collection of gopher CGI/DCGI for geomyidae
(HTM) hg clone https://bitbucket.org/iamleot/gophercgis
(DIR) Log
(DIR) Files
(DIR) Refs
(DIR) README
(DIR) LICENSE
---
(DIR) changeset 312563f7eecdf87ca7ade4dbb71161ddb11c826d
(HTM) Author: Leonardo Taccari <iamleot@gmail.com>
Date: Tue, 7 Aug 2018 09:56:04
Initial commit
Diffstat:
LICENSE | 24 ++++++
feeds/feed.dcgi | 3 +
feeds/feed.py | 33 ++++++++
instagram/comments.dcgi | 20 +++++
instagram/user.dcgi | 106 +++++++++++++++++++++++++++
lobsters/comments.dcgi | 100 ++++++++++++++++++++++++++
lobsters/lobsters.dcgi | 40 ++++++++++
nntp/nntp.dcgi | 182 ++++++++++++++++++++++++++++++++++++++++++++++++
twitter/tweets.dcgi | 3 +
twitter/tweets.py | 133 +++++++++++++++++++++++++++++++++++
youtube/channel.dcgi | 90 +++++++++++++++++++++++
11 files changed, 734 insertions(+), 0 deletions(-)
---
diff -r 000000000000 -r 312563f7eecd LICENSE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/LICENSE Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,24 @@
+Copyright (c) 2018 Leonardo Taccari
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff -r 000000000000 -r 312563f7eecd feeds/feed.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/feeds/feed.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+URL="$2" PYTHONIOENCODING="utf-8" /usr/pkg/bin/python2.7 feed.py
diff -r 000000000000 -r 312563f7eecd feeds/feed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/feeds/feed.py Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,33 @@
+#!/usr/pkg/bin/python2.7
+
+from __future__ import print_function, unicode_literals
+import feedparser
+
+
+def header(f):
+ print('t')
+ print('t{title}'.format(**f['feed']))
+ print('t')
+ print('t')
+
+
+def entry(e):
+ desc = e.get('title', '').replace('|', '\|')
+ if e.get('author'):
+ desc = desc + ' - ' + e.get('author', '').replace('|', '\|')
+ print('[h|{desc}|URL:{link}|server|port]'.format(
+ desc=desc,
+ link=e.get('link', '').replace('|', '\|')))
+ if e.get('published'):
+ print('t{published}'.format(published=e.get('published', '')))
+ print('t')
+
+
+if __name__ == '__main__':
+ import os
+
+ url = os.getenv('URL')
+ feed = feedparser.parse(url)
+ header(feed)
+ for e in feed['entries']:
+ entry(e)
diff -r 000000000000 -r 312563f7eecd instagram/comments.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/instagram/comments.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+CGI="/cgi/instagram/user.dcgi"
+
+
+media="$2"
+
+
+/usr/bin/ftp -V -o - "https://www.instagram.com/p/${media}/" |
+sed -ne 's/;<\/script>$//' -e '/sharedData =/s/^.*sharedData = //p' |
+/usr/pkg/bin/jq -r '
+.entry_data.PostPage[].graphql.shortcode_media |
+ .edge_media_to_comment | .edges[] |
+ ( "[1|" + "@" +
+ ( .node.owner.username | gsub("\\|"; "\\|") ) + "|" +
+ "'"${CGI}?"'" + ( .node.owner.username | gsub("\\|"; "\\|") ) +
+ "|server|port]",
+ "t" + .node.text,
+ "t" )
+'
diff -r 000000000000 -r 312563f7eecd instagram/user.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/instagram/user.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,106 @@
+#!/bin/sh
+
+
+CGI="/cgi/instagram/user.dcgi"
+
+
+case "$2" in
+*/*/*/*)
+ # user, id, rhx_gis and end_cursor are provided (next pages) (`data')
+ IFS=/ set -- "$2"
+ set -- $*
+ user="$1"
+ id="$2"
+ rhx_gis="$3"
+ end_cursor="$4"
+ ;;
+*)
+ # only user is provided (first (AKA profile) page) (`profile')
+ user="$2"
+ ;;
+esac
+
+
+profile_header='
+.entry_data.ProfilePage[] | .graphql.user |
+ "t",
+ "t@" + .username + " - " + .full_name,
+ "t",
+ "t" + .biography,
+ "t",
+ "tFollowed by: " + ( .edge_followed_by.count | tostring ),
+ "tFollows: " + ( .edge_follow.count | tostring ),
+ "t",
+'
+
+data_header='
+.data.user |
+ "t",
+ "t@" + "'"${user}"'",
+ "t",
+'
+
+#
+# FIXME: The `Next page' should be added only if there is really a next page!
+#
+profile_footer='
+,
+"[1|>> Next page|'"${CGI}?${user}/"'" +
+ .entry_data.ProfilePage[].graphql.user.id + "/" +
+ .rhx_gis + "/" +
+ .entry_data.ProfilePage[].graphql.user.edge_owner_to_timeline_media.page_info.end_cursor +
+ "|server|port]",
+"t"
+'
+
+#
+# FIXME: The `Next page' should be added only if there is really a next page!
+#
+data_footer='
+,
+"[1|>> Next page|'"${CGI}?${user}/${id}/${rhx_gis}/"'" +
+ .data.user.edge_owner_to_timeline_media.page_info.end_cursor +
+ "|server|port]",
+"t"
+'
+
+
+if [ "${rhx_gis}" ] && [ "${end_cursor}" ]; then
+ header="${data_header}"
+ footer="${data_footer}"
+ variables='{"id":"'"${id}"'","first":12,"after":"'"${end_cursor}"'"}'
+ url="https://www.instagram.com/graphql/query/?query_hash=bd0d6d184eefd4d0ce7036c11ae58ed9&variables=${variables}"
+
+ #
+ # XXX: This was described in:
+ # XXX: <https://stackoverflow.com/questions/49786980/>
+ #
+ gis=$(printf "%s" "${rhx_gis}:${variables}" | md5 -q)
+
+ fetch_cmd() {
+ /usr/pkg/bin/curl -H "X-Instagram-GIS: ${gis}" -sL -g "${url}"
+ }
+else
+ header="${profile_header}"
+ footer="${profile_footer}"
+ url="https://www.instagram.com/${user}/"
+ fetch_cmd() {
+ /usr/pkg/bin/curl -sL -g "${url}" |
+ sed -ne 's/;<\/script>$//' -e '/sharedData =/s/^.*sharedData = //p'
+ }
+fi
+
+fetch_cmd |
+/usr/pkg/bin/jq -r '
+(
+'"${header}"'
+ ( .edge_owner_to_timeline_media.edges[] | .node |
+ (( "[h|[+]|" + "URL:" + .display_url + "|server|port]" ),
+ ( .edge_media_to_caption.edges[] | "t" + .node.text )),
+ "tLikes: " + ( .edge_media_preview_like.count | tostring ),
+ "[h|Comments: " + ( .edge_media_to_comment.count | tostring ) + "|" +
+ "URL:" + "https://www.instagram.com/p/" + .shortcode + "|server|port]",
+ "t" )
+)
+'"${footer}"'
+'
diff -r 000000000000 -r 312563f7eecd lobsters/comments.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lobsters/comments.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,100 @@
+#!/bin/sh
+
+short_id="$2"
+
+/usr/bin/ftp -V -o - "https://lobste.rs/s/${short_id}.json" |
+/usr/pkg/bin/jq -r '
+.title + "\t" + .url + "\t" + ( .score | tostring ) + "\t" +
+ ( .created_at[0:19] + "Z" | sub("T"; " ") | sub("Z"; "") | sub(":[0-9]+$"; "") ) + "\t" +
+ .submitter_user.username + "\u001e",
+ ( .comments[] |
+ .commenting_user.username + "\u001c" +
+ ( .indent_level | tostring ) + "\u001c" +
+ ( .comment ) + "\u001c" +
+ ( .url ) + "\u001e" )
+' | awk '
+
+BEGIN {
+ FS = "\034"
+ RS = "\036"
+}
+
+function html2text(s) {
+ gsub(/<\/a>/, "", s)
+ gsub(/<\/code>/, "", s)
+ gsub(/<\/em>/, "", s)
+ gsub(/<\/li>/, "", s)
+ gsub(/<\/ol>/, "", s)
+ gsub(/<\/ul>/, "", s)
+ gsub(/<blockquote>(<p>)?/, "\n«", s)
+ gsub(/(<\/p>)?<\/blockquote>/, "»\n", s)
+ gsub(/<\/p>/, "\n", s)
+ gsub(/<hr>/, "", s)
+ gsub(/<p>/, "\n", s)
+ gsub(/<ol>/, "", s)
+ gsub(/<ul>/, "", s)
+ gsub(/<li>/, "\n - ", s)
+ gsub(/<a href="[^"]+" rel="nofollow">/, "", s)
+ gsub(/<code>/, "", s)
+ gsub(/<em>/, "", s)
+ return s
+}
+
+function print_title(title, url, score, created_at, username) {
+ gsub("\\|", "\\|", title)
+
+ print ""
+ print "Lobsters - Comments"
+ print ""
+ print "[h|" title "|" "URL:" url "|server|port]"
+ print "via " username " " created_at " " "(" score ")"
+ print ""
+}
+
+function print_comment(comment, i) {
+ fmt_cmd = "/usr/pkg/bin/par -d 0 -B=. -w 80"
+ printf("[h|%s|URL:%s|server|port]\n",
+ comment[i, "username"] " writes:",
+ comment[i, "url"])
+ print comment[i, "comment"] | fmt_cmd
+ close(fmt_cmd)
+ print ""
+}
+
+{
+ # get rid of leading and trailing newlines
+ gsub(/^\n/, "")
+ gsub(/\n$/, "")
+
+ # XXX: get rid of all newlines otherwise the output is messed up
+ gsub(/\n/, "")
+}
+
+# Title, etc. TSV
+NR == 1 {
+ split($0, fields, "\t")
+ title = fields[1]
+ url = fields[2]
+ score = fields[3]
+ created_at = fields[4]
+ username = fields[5]
+
+ print_title(title, url, score, created_at, username)
+ next
+}
+
+NF == 4 {
+ n++
+ comment[n, "username"] = $1
+ comment[n, "indent_level"] = $2
+ comment[n, "comment"] = html2text($3)
+ comment[n, "url"] = $4
+ next
+}
+
+END {
+ for (i = 1; i <= n; i++) {
+ print_comment(comment, i)
+ }
+}
+' | sed -e '/^\[[0137Ighi]\|/n' -e 's/^/t/'
diff -r 000000000000 -r 312563f7eecd lobsters/lobsters.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lobsters/lobsters.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+CGI="/cgi/lobsters/lobsters.dcgi"
+COMMENTS_CGI="/cgi/lobsters/comments.dcgi"
+
+page="$2"
+case "${page}" in
+page/[0-9]*) page_n=${page##page/} ;;
+newest) ;;
+hottest) ;;
+*) page="page/1" page_n=1 ;;
+esac
+
+echo "t"
+echo "tLobsters"
+echo "t"
+
+/usr/bin/ftp -V -o - "https://lobste.rs/${page}.json" |
+/usr/pkg/bin/jq -r '
+.[] |
+"[h|" + ( .title | gsub("\\|"; "\\|") ) + "|" + "URL:" + .url + "|server|port]",
+"tvia " + .submitter_user.username + " " +
+ ( .created_at[0:19] + "Z" | sub("T"; " ") | sub("Z"; "") | sub(":[0-9]+$"; "") ) +
+ " " + ( .tags | join(", ") ) + " " + "(" + ( .score | tostring ) + ")",
+"[1|Comments: " + ( .comment_count | tostring ) + "|" + "'"${COMMENTS_CGI}"'?" + .short_id + "|server|port]",
+"t"
+'
+
+if [ "${page_n}" -gt 1 ]; then
+ prev_page_n=$((page_n - 1))
+ prev_page="page/${prev_page_n}"
+ echo "[1|<< Page ${prev_page_n}|${CGI}?${prev_page}|server|port]"
+fi
+
+if [ "${page_n}" -ge 1 ]; then
+ next_page_n=$((page_n + 1))
+ next_page="page/${next_page_n}"
+ echo "[1|>> Page ${next_page_n}|${CGI}?${next_page}|server|port]"
+ echo "t"
+fi
diff -r 000000000000 -r 312563f7eecd nntp/nntp.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/nntp/nntp.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,182 @@
+#!/bin/sh
+
+CGI="/cgi/nntp/nntp.dcgi"
+NNTP_MESSAGES=100 # default number to over (+1)
+NNTP_SERVER="news.gmane.org"
+NNTP_PORT="119"
+NNTP_CMD="nc ${NNTP_SERVER} ${NNTP_PORT}"
+
+
+case "$2" in
+*/[0-9]*/[0-9]*)
+ # OVER (scan) of articles num1...num2
+ group="${2%%/*}"
+ nums="${2#*/}" ; num1="${nums%%/*}" ; num2="${nums##*/}"
+ ;;
+*/[0-9]*)
+ # BODY (show) of article `msg'
+ group="${2%%/*}"
+ msg="${2##*/}"
+ ;;
+*)
+ # OVER (scan) of last 10
+ group="$2"
+ ;;
+esac
+
+
+#
+# GROUP and print the information about the selected group, i.e.:
+# `211 <numbers> <first> <last> <group_name>'
+#
+nntp_group()
+{
+ g="$1"
+
+ { echo "group ${g}"; echo "quit"; } |
+ ${NNTP_CMD} |
+ sed -ne '/^211/p'
+}
+
+
+#
+# OVER and gph-ize the output similar to mailx(1).
+#
+nntp_over()
+{
+ g="$1"
+ range="$2"
+
+ { echo "group ${g}"; echo "over ${range}"; echo "quit"; } |
+ ${NNTP_CMD} |
+ awk '
+ BEGIN {
+ FS = "\t"
+ over = 1
+ }
+
+ /^\.\r$/ {
+ over = 0
+ next
+ }
+
+ NR > 3 && over {
+ id = $1
+ subject = $2
+ from = $3
+ date = $4
+
+ gsub("\\|", "\\|", subject)
+ gsub("\\|", "\\|", from)
+ gsub("\\|", "\\|", date)
+
+ # Ignore seconds and TZ
+ sub(/:[0-9][0-9] [+-][0-9]+$/, "", date)
+
+ printf("[1| %d %-17s %-22s %s|%s/%d|server|port]\n",
+ id, substr(from, 1, 17), substr(date, 1, 22), subject,
+ "'"${CGI}"?"${g}"'", id)
+ }
+ ' | tail -r
+
+}
+
+
+#
+# BODY (and headers) of the selected `msg', properly gph-ized.
+#
+nntp_body()
+{
+ g="$1"
+ msg="$2"
+
+ { echo "group ${g}"; echo "over ${msg}"; echo "body ${msg}"; echo "quit"; } |
+ ${NNTP_CMD} |
+ awk '
+ BEGIN {
+ FS = "\t"
+ body = 1
+ over = 1
+ }
+
+ /^\.\r$/ {
+ if (over && body) {
+ over = 0
+ } else {
+ body = 0
+ }
+ next
+ }
+
+ NR > 3 && over {
+ id = $1
+ subject = $2
+ from = $3
+ date = $4
+ print "t" "Date: " date
+ print "t" "To: " "'"${g}"'"
+ print "t" "From: " from
+ print "t" "Subject: " subject
+ print "t" ""
+ }
+
+ NR > 6 && body {
+ gsub("\t", " ")
+ print "t" $0
+ }
+ '
+}
+
+
+#
+# <group>/<msg>: show the article
+#
+if [ "${msg}" ] && [ "${msg}" -gt 0 ]; then
+ echo "t"
+ nntp_body "${group}" "${msg}"
+ echo "[1|<< ${group}|${CGI}?${group}|server|port]"
+ echo "t"
+ exit 0
+fi
+
+
+#
+# <group>: show ID, From:, Date: and Subject: of the most recent NNTP_MESSAGES
+# <group>/<num1>/<num2>: show ID, From:, Date: and Subject: from <num1> to
+# <num2>
+#
+if [ "${group}" ]; then
+ set -- `nntp_group "${group}"`
+ n="$2"; first="$3"; last="$4"
+
+ : ${num1:=$((last - NNTP_MESSAGES))}
+ : ${num2:=${last}}
+
+ [ "${num1}" -le 0 ] && num1=1
+
+ echo "t"
+ echo "t${group} (${num1}-${num2})"
+ echo "t"
+ nntp_over "${group}" "${num1}-${num2}"
+ echo "t"
+
+ if [ "${num2}" -gt "${NNTP_MESSAGES}" ]; then
+ prev_num1=$((num1 - NNTP_MESSAGES))
+ [ "${prev_num1}" -le 0 ] && prev_num1=1
+ prev_num2=$((num2 - NNTP_MESSAGES))
+
+ echo "[1|<< Older articles|${CGI}?${group}/${prev_num1}/${prev_num2}|server|port]"
+ echo "t"
+ fi
+
+ if [ "${num2}" -lt "${last}" ]; then
+ new_num1=$((num1 + NNTP_MESSAGES))
+ new_num2=$((num2 + NNTP_MESSAGES))
+ [ "${new_num2}" -gt "${last}" ] && new_num2="${last}"
+
+ echo "[1|>> Newer articles|${CGI}?${group}/${new_num1}/${new_num2}|server|port]"
+ echo "t"
+ fi
+
+ exit 0
+fi
diff -r 000000000000 -r 312563f7eecd twitter/tweets.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/twitter/tweets.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+CGI="/cgi/twitter/tweets.dcgi" TWEET_USER="$2" PYTHONIOENCODING="utf-8" /usr/pkg/bin/python2.7 tweets.py
diff -r 000000000000 -r 312563f7eecd twitter/tweets.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/twitter/tweets.py Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,133 @@
+#!/usr/pkg/bin/python2.7
+
+from __future__ import print_function, unicode_literals
+import textwrap
+import re
+import urllib
+from bs4 import BeautifulSoup
+
+
+TWEETS_BASE = 'https://mobile.twitter.com'
+
+
+def get_tweets(user):
+ r = urllib.urlopen(TWEETS_BASE + '/' + user)
+ return BeautifulSoup(r, 'html.parser')
+
+
+def parse_tweets(bs):
+ ts = []
+ for t in bs.find_all('table', class_='main-tweet') + bs.find_all('table', class_='tweet'):
+ fullname = t.find(class_='fullname').text.strip()
+ username = t.find(class_='username').text.strip()
+ if t.find(class_='timestamp'):
+ timestamp = t.find(class_='timestamp').text.strip()
+ else:
+ timestamp = ''
+ if t.find(class_='tweet-reply-context'):
+ context = t.find(class_='tweet-reply-context').text.strip()
+ context = context.replace('\n', ' ')
+ context = ' '.join(context.split()) # Get rid of extra blanks
+ if t.find('a', text='View conversation'):
+ context_url = t.find('a', text='View conversation')['href']
+ context_url = context_url.lstrip('/').replace('?p=v', '')
+ else:
+ context_url = None
+ elif t.find(class_='tweet-social-context'):
+ context = t.find(class_='tweet-social-context').text.strip()
+ context = context.replace('\n', ' ')
+ context = ' '.join(context.split()) # Get rid of extra blanks
+ context_url = None
+ else:
+ context = None
+ context_url = None
+ text = parse_tweet_text(t.find(class_='tweet-text'))
+ url = username.replace('@', '') + '/status/' + \
+ t.find(class_='tweet-text').get('data-id', '')
+ ts.append({
+ 'fullname': fullname,
+ 'username': username,
+ 'timestamp': timestamp,
+ 'url': url,
+ 'context': context,
+ 'context_url': context_url,
+ 'text': text,
+ })
+
+ return ts
+
+
+def parse_tweet_text(tweet_text):
+ # Expand URLs
+ for a in tweet_text.find_all('a', class_='twitter_external_link'):
+ a.replace_with(a['data-url'])
+
+ return tweet_text.text.strip()
+
+
+def header(bs):
+ fullname = bs.find(class_='fullname').text.strip()
+ username = bs.find(class_='username').text.replace('\n', '').strip()
+ url = bs.find('link', rel='canonical')['href']
+ print('t')
+ print('[h|{fullname} {username}|URL:{url}|server|port]'.format(
+ fullname=fullname.replace('|', '\|'),
+ username=username.replace('|', '\|'),
+ url=url.replace('|', '\|'),
+ ))
+ print('t')
+
+
+def more(bs, cgi):
+ if bs.find(class_='w-button-more'):
+ more = bs.find(class_='w-button-more').a['href'].replace('/', '')
+ print('[1|Older tweets|{cgi}?{more}|server|port]'.format(
+ cgi=cgi,
+ more=more,
+ ))
+ print('t')
+
+
+def tweet(t, cgi):
+ print('[1|{fullname} {username} {timestamp}|{cgi}?{url}|server|port]'.format(
+ cgi=cgi,
+ fullname=t['fullname'].replace('|', '\|'),
+ username=t['username'].replace('|', '\|'),
+ timestamp=t['timestamp'].replace('|', '\|'),
+ url=t['url'].replace('|', '\|'),
+ ))
+ if t['context']:
+ if t['context_url']:
+ print('[1|{context}|{cgi}?{url}|server|port]'.format(
+ cgi=cgi,
+ context=t['context'],
+ url=t['context_url'],
+ ))
+ else:
+ print('t{context}'.format(context=t['context']))
+ text = textwrap.fill(t['text'], width=80, break_long_words=False,
+ break_on_hyphens=False)
+
+ # XXX: RE and .replace() dance in order to have all URLs and [h|...]
+ # XXX: entries in a single line and without a leading "t".
+ text = re.sub(r'((?:http|https)://[^\s]+)',
+ '\n [h|\g<1>|URL:\g<1>|server|port]', text)
+ text = text.replace('\n\n', '\n')
+ text = re.sub('\n([^ ])', '\nt\g<1>', text)
+ text = text.replace('\n ', '\n')
+
+ print('t{text}'.format(text=text))
+ print('t')
+
+
+if __name__ == '__main__':
+ import os
+
+ user = os.getenv('TWEET_USER')
+ cgi = os.getenv('CGI')
+ b = get_tweets(user)
+ header(b)
+ tweets = parse_tweets(b)
+ for t in tweets:
+ tweet(t, cgi)
+ more(b, cgi)
diff -r 000000000000 -r 312563f7eecd youtube/channel.dcgi
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/youtube/channel.dcgi Tue Aug 07 09:56:04 2018 +0200
@@ -0,0 +1,90 @@
+#!/bin/sh
+
+channel="$2"
+
+/usr/bin/ftp -V -o - "https://www.youtube.com/feeds/videos.xml?channel_id=${channel}" |
+/usr/bin/awk '
+function print_feed_title(feed_title) {
+ print "t"
+ print "t" html_decode(feed_title) " - YouTube Channel"
+ print "t"
+}
+
+function print_entry(published, title, url, thumbnail, description, views) {
+ gsub("\\|", "\\|", title)
+ print "[h|" html_decode(title) "|" "URL:" url "|server|port]"
+ print "tPublished: " substr(published, 1, 10) # ignore hour:minute
+ print "tViews: " views
+ sub(/^/, "t", description)
+ gsub(/\n/, "\nt", description)
+ sub(/$/, "\nt", description)
+ print html_decode(description)
+}
+
+function html_decode(s)
+{
+ gsub("\\&", "\\&", s)
+ gsub("\\<", "<", s)
+ gsub("\\>", ">", s)
+ gsub("\\"", "\"", s)
+
+ return s
+}
+
+/<title>/ && !feed_title {
+ sub(/^ *<title>/, "")
+ sub(/<\/title>$/, "")
+ feed_title = $0
+ print_feed_title(feed_title)
+}
+
+/<published>/ {
+ sub(/^ *<published>/, "")
+ sub(/<\/published>$/, "")
+ published = $0
+}
+
+/<media:title>/ {
+ sub(/^ *<media:title>/, "")
+ sub(/<\/media:title>$/, "")
+ title = $0
+}
+
+/<media:content / {
+ if (match($0, /url="[^"]+"/)) {
+ # Ignore url=" and "
+ url = substr($0, RSTART + 5, RLENGTH - 6)
+ sub("/v/", "/watch?v=", url)
+ sub("\\?version=3", "", url)
+ }
+}
+
+/<media:thumbnail / {
+ if (match($0, /url="[^"]+"/)) {
+ # Ignore url=" and "
+ thumbnail = substr($0, RSTART + 5, RLENGTH - 6)
+ }
+}
+
+/<media:description>/, /<\/media:description>/ {
+ sub(/<\/media:description>$/, "")
+ sub(/^ *<media:description>/, "")
+ if (!description) {
+ description = $0
+ } else {
+ description = description "\n" $0
+ }
+}
+
+/<media:statistics / {
+ if (match($0, /views="[^"]+"/)) {
+ # Ignore views=" and "
+ views = substr($0, RSTART + 7, RLENGTH - 8)
+ }
+}
+
+published && title && url && thumbnail && description && views {
+ print_entry(published, title, url, thumbnail, description, views)
+ published = title = url = thumbnail = description = views = ""
+}
+'