#!/usr/bin/env @PYTHON@
# -*- python -*-
# @configure_input@
# msrss: Merge and Scrub RSS feeds
# Copyright (C) 2013 Jack Kelly <jack@jackkelly.name>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import feedparser
import hashlib
import itertools
import logging
import operator
import os
import pickle
import PyRSS2Gen as rss2
import re
import sys
import time
import urllib
import urllib.parse as urlparse
feedparser.USER_AGENT = 'msrss/@PACKAGE_VERSION@ +https://git.sr.ht/~jack/msrss'
def setup_logging(level):
'''Check log level is valid. Set the log level.'''
if not isinstance(level, int):
raise ValueError('Invalid log level: %s' % level)
logging.basicConfig(format='%(levelname)s:%(message)s', level=level)
def setup_data_dir(dir):
'''Create $HOME/.msrss if it doesn't exist.'''
dir = os.path.join(os.environ['HOME'], '.msrss')
if os.path.isdir(dir): return
if os.path.lexists(dir):
logging.critical('Data dir %s exists, but is not a directory!' % dir)
logging.shutdown()
sys.exit(1)
logging.info('Create data dir: %s' % dir)
os.mkdir(dir)
def load_feed(url, data_dir):
'''Load a feed from a given URL. Feeds are cached in data_dir and
ETags and the Last-Modified header are both used to prevent
duplicate downloads. The cache file name is the SHA1sum of the
URL.'''
cache_file = os.path.join(data_dir, hashlib.sha1(url.encode()).hexdigest())
old_feed = None
if os.path.exists(cache_file):
with open(cache_file, 'rb') as f:
old_feed = pickle.load(f)
etag = None
modified = None
if old_feed is not None:
if hasattr(old_feed, 'etag'): etag = old_feed.etag
if hasattr(old_feed, 'modified'): modified = old_feed.modified
new_feed = feedparser.parse(url, etag=etag, modified=modified)
if new_feed.status == 200 or new_feed.status == 302:
logging.info('%s has updated.' % url)
with open(cache_file, 'wb') as f:
pickle.dump(new_feed, f)
return new_feed
elif new_feed.status == 304:
logging.info('%s has not changed.' % url)
return old_feed
elif new_feed.status == 301:
logging.warn('%s has permanently moved. You should use %s instead.' %
(url, new_feed.href))
return new_feed
elif new_feed.status == 410:
logging.warn('%s was deleted. You should stop requesting it.' % url)
return old_feed
else:
logging.warn('HTTP code %d' % new_feed.status)
return old_feed
def label_entries(feed, label):
'''Prefix the title of every entry with [LABEL].'''
for entry in feed.entries:
entry.title = '[' + label + '] ' + entry.title
entry.title_detail.value = entry.title
def fix_url(url):
"""
stolen shamelessly from
https://github.com/HBehrens/feedsanitizer/blob/master/misc.py
and then adapted for python3.
Copyright (c) 2011 Heiko Behrens
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
adapted from
http://stackoverflow.com/questions/804336/best-way-to-convert-a-unicode-url-to-ascii-utf-8-percent-escaped-in-python/804380#804380
"""
# parse it
parsed = urlparse.urlsplit(url)
scheme = parsed.scheme
# divide the netloc further
userpass,at,hostport = parsed.netloc.partition('@')
if not hostport:
hostport, userpass = userpass, hostport
user,_,pass_ = userpass.partition(':')
host,_,port = hostport.partition(':')
# encode each component
user = urlparse.quote(user.encode('utf8'))
pass_ = urlparse.quote(pass_.encode('utf8'))
host = host.encode('idna').lower()
path = '/'.join( # could be encoded slashes!
urlparse.quote(urlparse.unquote(pce).encode('utf8'),'')
for pce in parsed.path.split('/')
)
if not path:
path = "/"
query = parsed.query
fragment = parsed.fragment
# put it back together
netloc = ''.join((user,':',pass_,at,host.decode(),':',port))
return urlparse.urlunsplit((scheme,netloc,path,query,fragment))
def merge_items(feeds):
'''Merge the items in each feed from FEEDS (a list of parsed feeds
from feedparser).
returns a list of PyRSS2Gen.RSSItem.'''
sorted_entries = sorted(itertools.chain(*(feed.entries for feed in feeds)),
key=operator.attrgetter('updated_parsed'))
return [ rss2.RSSItem(title=e.title,
link=fix_url(e.link),
description=e.get('summary'),
author=e.get('author', 'Unknown'),
comments=e.get('comments'),
guid=e.get('id'),
pubDate=e.updated) for e in sorted_entries ]
def rfc2822(tm):
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", tm)
class PrintVersion(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print ('@PACKAGE_NAME@ @PACKAGE_VERSION@\n'
+ 'Copyright (C) 2014 Jack Kelly.\n'
+ ('License GPLv3+: GNU GPL version 3 or later'
+ ' <http://gnu.org/licenses/gpl.html>.\n')
+ ('This is free software:'
+ ' you are free to change and redistribute it.\n')
+ 'There is NO WARRANTY, to the extent permitted by law.')
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Collect RSS/Atom feeds and emit one RSS2 feed.')
parser.add_argument('-o', '--output' , default=sys.stdout,
type=argparse.FileType('w'),
help='Output file (default: stdout)')
parser.add_argument('-d', '--description',
help="Output feed's description")
parser.add_argument('-k', '--link',
help="Output feed's link")
parser.add_argument('-t', '--title',
help="Output feed's title")
parser.add_argument('--data-dir',
default=os.path.join(os.environ['HOME'], '.msrss'),
help='Directory to store downloaded feeds')
parser.add_argument('--log-level', default='WARN',
help='Logging level (default: WARN)')
parser.add_argument('-l', '--label-feed', metavar=('LABEL', 'FEED_URL'),
nargs=2, action='append', dest='labelled_feeds',
help=('Merge and clean FEED_URL, but prefix'
+ ' its entries with [LABEL]'))
parser.add_argument('--version', action=PrintVersion, nargs=0,
help='Print version information and exit')
parser.add_argument('feeds', metavar='FEED_URL', nargs='*',
help='Feed URLs to merge and clean')
args = parser.parse_args()
setup_logging(getattr(logging, args.log_level.upper(), None))
setup_data_dir(args.data_dir)
feeds = [ load_feed(url, args.data_dir) for url in args.feeds ]
if args.labelled_feeds is not None:
for label, url in args.labelled_feeds:
feed = load_feed(url, args.data_dir)
label_entries(feed, label)
feeds.append(feed)
items = merge_items(feeds)
description = args.description
if description is None:
if len(feeds) == 1:
description = feeds[0].feed.get('description')
else:
urls = list(args.feeds)
if args.labelled_feeds is not None:
urls.extend( url for _, url in args.labelled_feeds )
urls.sort()
description = 'Feed generated from: [' + ', '.join(urls) + ']'
link = args.link
if link is None:
if len(feeds) == 1:
link = feeds[0].feed.get('link')
else:
link = 'http://www.example.com'
title = args.title
if title is None:
if len(feeds) == 1:
title = feeds[0].feed.get('title')
else:
title = 'Unitled Feed'
pubdate = rfc2822(max([time.gmtime(0)]
+ [ feed.updated_parsed for feed in feeds ]))
out_feed = rss2.RSS2(title, link, description,
pubDate = pubdate,
lastBuildDate = rfc2822(time.gmtime()),
items = items)
out_feed.write_xml(args.output)