2013-04-04 15:56:37 +00:00
|
|
|
#!/usr/bin/env python
|
2013-02-25 14:50:32 +00:00
|
|
|
import sys
|
2013-02-25 17:01:59 +00:00
|
|
|
import os
|
2013-04-15 16:51:55 +00:00
|
|
|
import os.path
|
2013-04-22 22:00:07 +00:00
|
|
|
import time
|
2013-04-22 22:04:44 +00:00
|
|
|
|
|
|
|
from base64 import b64encode, b64decode
|
|
|
|
import re
|
|
|
|
import string
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
import lxml.html
|
|
|
|
import lxml.html.clean
|
|
|
|
import lxml.builder
|
2013-04-22 22:04:44 +00:00
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
import feeds
|
|
|
|
|
2013-02-25 14:50:32 +00:00
|
|
|
import urllib2
|
2013-05-05 13:31:11 +00:00
|
|
|
import socket
|
2013-02-25 19:53:59 +00:00
|
|
|
from cookielib import CookieJar
|
2013-04-04 15:43:30 +00:00
|
|
|
import chardet
|
2013-05-15 15:12:59 +00:00
|
|
|
import urlparse
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-04-19 09:37:43 +00:00
|
|
|
from readability import readability
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-15 15:56:58 +00:00
|
|
|
LIM_ITEM = 100 # deletes what's beyond
|
|
|
|
MAX_ITEM = 50 # cache-only beyond
|
|
|
|
MAX_TIME = 7 # cache-only after
|
|
|
|
DELAY = 10 # xml cache
|
|
|
|
TIMEOUT = 2 # http timeout
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-05 13:30:06 +00:00
|
|
|
OPTIONS = ['progress', 'cache']
|
|
|
|
|
2013-05-01 15:54:17 +00:00
|
|
|
UA_RSS = 'Liferea/1.8.12 (Linux; fr_FR.utf8; http://liferea.sf.net/)'
|
|
|
|
UA_HML = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11'
|
|
|
|
|
2013-05-15 15:12:59 +00:00
|
|
|
PROTOCOL = ['http', 'https', 'ftp']
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
ITEM_MAP = {
|
|
|
|
'link': (('{http://www.w3.org/2005/Atom}link', 'href'), '{}link'),
|
|
|
|
'desc': ('{http://www.w3.org/2005/Atom}summary', '{}description'),
|
|
|
|
'description': ('{http://www.w3.org/2005/Atom}summary', '{}description'),
|
|
|
|
'summary': ('{http://www.w3.org/2005/Atom}summary', '{}description'),
|
|
|
|
'content': ('{http://www.w3.org/2005/Atom}content', '{http://purl.org/rss/1.0/modules/content/}encoded')
|
|
|
|
}
|
|
|
|
RSS_MAP = {
|
|
|
|
'desc': ('{http://www.w3.org/2005/Atom}subtitle', '{}description'),
|
|
|
|
'description': ('{http://www.w3.org/2005/Atom}subtitle', '{}description'),
|
|
|
|
'subtitle': ('{http://www.w3.org/2005/Atom}subtitle', '{}description'),
|
|
|
|
'item': ('{http://www.w3.org/2005/Atom}entry', '{}item'),
|
|
|
|
'entry': ('{http://www.w3.org/2005/Atom}entry', '{}item')
|
|
|
|
}
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-04-22 22:00:07 +00:00
|
|
|
if 'REQUEST_URI' in os.environ:
|
2013-04-04 15:43:30 +00:00
|
|
|
import httplib
|
|
|
|
httplib.HTTPConnection.debuglevel = 1
|
|
|
|
|
|
|
|
import cgitb
|
|
|
|
cgitb.enable()
|
2013-02-25 17:01:59 +00:00
|
|
|
|
2013-02-25 20:36:02 +00:00
|
|
|
def log(txt):
|
2013-04-22 22:00:07 +00:00
|
|
|
if not 'REQUEST_URI' in os.environ:
|
|
|
|
if os.getenv('DEBUG', False):
|
2013-05-05 13:33:46 +00:00
|
|
|
print repr(txt)
|
2013-04-22 22:00:07 +00:00
|
|
|
else:
|
2013-04-04 15:43:30 +00:00
|
|
|
with open('morss.log', 'a') as file:
|
2013-04-16 14:13:14 +00:00
|
|
|
file.write(repr(txt).encode('utf-8') + "\n")
|
2013-04-15 16:51:55 +00:00
|
|
|
|
|
|
|
def cleanXML(xml):
|
|
|
|
table = string.maketrans('', '')
|
|
|
|
return xml.translate(table, table[:32]).lstrip()
|
|
|
|
|
2013-05-15 15:24:27 +00:00
|
|
|
def lenHTML(txt):
|
2013-06-08 15:30:11 +00:00
|
|
|
if len(txt):
|
|
|
|
return len(lxml.html.fromstring(txt).text_content())
|
|
|
|
else:
|
|
|
|
return 0
|
2013-05-15 15:24:27 +00:00
|
|
|
|
2013-07-14 16:57:12 +00:00
|
|
|
def countWord(txt):
|
|
|
|
if len(txt):
|
|
|
|
return len(lxml.html.fromstring(txt).text_content().split())
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
|
2013-07-14 16:58:48 +00:00
|
|
|
def makeDesc(txt, length, suffix='...'):
|
|
|
|
' '.join(txt.split()[:length]) + suffix
|
|
|
|
|
|
|
|
def setContent(item, txt):
|
|
|
|
if not item.desc:
|
|
|
|
if item.content:
|
|
|
|
log('content alone')
|
|
|
|
item.desc = item.content
|
|
|
|
item.content = txt
|
|
|
|
else:
|
|
|
|
log('empty')
|
|
|
|
item.desc = makeDesc(txt, 30)
|
|
|
|
item.content = txt
|
|
|
|
else:
|
|
|
|
item.content = txt
|
|
|
|
|
2013-05-01 15:57:09 +00:00
|
|
|
def parseOptions(available):
|
|
|
|
options = None
|
|
|
|
if 'REQUEST_URI' in os.environ:
|
|
|
|
if 'REDIRECT_URL' in os.environ:
|
|
|
|
url = os.environ['REQUEST_URI'][1:]
|
|
|
|
else:
|
|
|
|
url = os.environ['REQUEST_URI'][len(os.environ['SCRIPT_NAME'])+1:]
|
|
|
|
|
2013-05-15 15:12:59 +00:00
|
|
|
if urlparse.urlparse(url).scheme not in PROTOCOL:
|
2013-05-01 15:57:09 +00:00
|
|
|
split = url.split('/', 1)
|
|
|
|
if len(split) and split[0] in available:
|
|
|
|
options = split[0]
|
|
|
|
url = split[1]
|
|
|
|
url = "http://" + url
|
|
|
|
|
|
|
|
else:
|
|
|
|
if len(sys.argv) == 3:
|
|
|
|
if sys.argv[1] in available:
|
|
|
|
options = sys.argv[1]
|
|
|
|
url = sys.argv[2]
|
|
|
|
elif len(sys.argv) == 2:
|
|
|
|
url = sys.argv[1]
|
|
|
|
else:
|
|
|
|
return (None, None)
|
|
|
|
|
2013-05-15 15:12:59 +00:00
|
|
|
if urlparse.urlparse(url).scheme not in PROTOCOL:
|
2013-05-01 15:57:09 +00:00
|
|
|
url = "http://" + url
|
|
|
|
|
|
|
|
return (url, options)
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
class Cache:
|
|
|
|
"""Light, error-prone caching system."""
|
|
|
|
def __init__(self, folder, key):
|
|
|
|
self._key = key
|
2013-05-15 15:48:39 +00:00
|
|
|
self._hash = str(hash(self._key))
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
self._dir = folder
|
2013-05-15 15:48:39 +00:00
|
|
|
self._file = self._dir + "/" + self._hash
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
self._cached = {} # what *was* cached
|
|
|
|
self._cache = {} # new things to put in cache
|
|
|
|
|
2013-06-08 15:30:53 +00:00
|
|
|
if os.path.isfile(self._file):
|
|
|
|
data = open(self._file).readlines()
|
2013-04-15 16:51:55 +00:00
|
|
|
for line in data:
|
2013-05-15 15:48:39 +00:00
|
|
|
if "\t" in line:
|
|
|
|
key, bdata = line.split("\t", 1)
|
|
|
|
self._cached[key] = bdata
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-15 15:48:39 +00:00
|
|
|
log(self._hash)
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-04-19 09:40:35 +00:00
|
|
|
def __del__(self):
|
|
|
|
self.save()
|
|
|
|
|
2013-04-22 22:00:07 +00:00
|
|
|
def __contains__(self, key):
|
|
|
|
return key in self._cached
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
def get(self, key):
|
|
|
|
if key in self._cached:
|
2013-04-22 22:00:07 +00:00
|
|
|
self._cache[key] = self._cached[key]
|
2013-04-15 16:51:55 +00:00
|
|
|
return b64decode(self._cached[key])
|
2013-04-04 15:43:30 +00:00
|
|
|
else:
|
2013-04-15 16:51:55 +00:00
|
|
|
return None
|
|
|
|
|
2013-04-19 09:40:35 +00:00
|
|
|
def set(self, key, content):
|
2013-04-15 16:51:55 +00:00
|
|
|
self._cache[key] = b64encode(content)
|
|
|
|
|
2013-04-19 09:40:35 +00:00
|
|
|
def save(self):
|
2013-04-22 20:56:38 +00:00
|
|
|
if len(self._cache) == 0:
|
|
|
|
return
|
|
|
|
|
2013-05-15 15:48:39 +00:00
|
|
|
out = []
|
2013-04-15 16:51:55 +00:00
|
|
|
for (key, bdata) in self._cache.iteritems():
|
2013-05-15 15:48:39 +00:00
|
|
|
out.append(str(key) + "\t" + bdata)
|
|
|
|
txt = "\n".join(out)
|
2013-04-15 16:51:55 +00:00
|
|
|
|
|
|
|
if not os.path.exists(self._dir):
|
|
|
|
os.makedirs(self._dir)
|
|
|
|
|
2013-05-15 15:48:39 +00:00
|
|
|
with open(self._file, 'w') as file:
|
|
|
|
file.write(txt)
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-04-22 22:00:07 +00:00
|
|
|
def isYoungerThan(self, sec):
|
|
|
|
if not os.path.exists(self._file):
|
|
|
|
return False
|
|
|
|
|
2013-05-15 15:48:39 +00:00
|
|
|
return time.time() - os.path.getmtime(self._file) < sec
|
2013-04-22 22:00:07 +00:00
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
def EncDownload(url):
|
|
|
|
try:
|
|
|
|
cj = CookieJar()
|
|
|
|
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
|
2013-05-01 15:54:17 +00:00
|
|
|
opener.addheaders = [('User-Agent', UA_HML)]
|
2013-04-30 17:54:32 +00:00
|
|
|
con = opener.open(url, timeout=TIMEOUT)
|
2013-04-15 16:51:55 +00:00
|
|
|
data = con.read()
|
2013-05-05 13:31:11 +00:00
|
|
|
except (urllib2.HTTPError, urllib2.URLError, socket.timeout) as error:
|
2013-04-15 16:51:55 +00:00
|
|
|
log(error)
|
|
|
|
return False
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-04-19 09:43:47 +00:00
|
|
|
# meta-redirect
|
|
|
|
match = re.search(r'(?i)<meta http-equiv=.refresh[^>]*?url=(http.*?)["\']', data)
|
|
|
|
if match:
|
|
|
|
new_url = match.groups()[0]
|
|
|
|
log('redirect: %s' % new_url)
|
|
|
|
return EncDownload(new_url)
|
|
|
|
|
|
|
|
# encoding
|
2013-04-15 16:51:55 +00:00
|
|
|
if con.headers.getparam('charset'):
|
|
|
|
log('header')
|
|
|
|
enc = con.headers.getparam('charset')
|
|
|
|
else:
|
2013-04-30 17:51:29 +00:00
|
|
|
match = re.search('charset=["\']?([0-9a-zA-Z-]+)', data)
|
|
|
|
if match:
|
2013-04-15 16:51:55 +00:00
|
|
|
log('meta.re')
|
2013-04-30 17:51:29 +00:00
|
|
|
enc = match.groups()[0]
|
2013-04-15 16:51:55 +00:00
|
|
|
else:
|
|
|
|
log('chardet')
|
|
|
|
enc = chardet.detect(data)['encoding']
|
|
|
|
|
2013-06-08 15:32:55 +00:00
|
|
|
log(enc)
|
|
|
|
return (data.decode(enc, 'replace'), con.geturl())
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
def Fill(item, cache, feedurl="/", fast=False):
|
2013-05-15 15:38:52 +00:00
|
|
|
""" Returns True when it has done its best """
|
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
if not item.link:
|
2013-04-22 22:24:41 +00:00
|
|
|
log('no link')
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-04-22 22:24:41 +00:00
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
log(item.link)
|
|
|
|
|
2013-06-11 11:02:16 +00:00
|
|
|
# feedburner
|
2013-07-14 16:44:11 +00:00
|
|
|
feeds.NSMAP['feedburner'] = 'http://rssnamespace.org/feedburner/ext/1.0'
|
|
|
|
match = item.xval('feedburner:origLink')
|
|
|
|
if match:
|
|
|
|
item.link = match
|
2013-05-01 15:43:43 +00:00
|
|
|
log(item.link)
|
|
|
|
|
2013-06-11 11:02:16 +00:00
|
|
|
# feedsportal
|
2013-04-19 09:44:25 +00:00
|
|
|
match = re.search('/([0-9a-zA-Z]{20,})/story01.htm$', item.link)
|
|
|
|
if match:
|
|
|
|
url = match.groups()[0].split('0')
|
2013-04-28 08:10:58 +00:00
|
|
|
t = {'A':'0', 'B':'.', 'C':'/', 'D':'?', 'E':'-', 'I':'_', 'L':'http://', 'S':'www.', 'N':'.com', 'O':'.co.uk'}
|
|
|
|
item.link = "".join([(t[s[0]] if s[0] in t else "=") + s[1:] for s in url[1:]])
|
2013-04-19 09:44:25 +00:00
|
|
|
log(item.link)
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-06-11 11:02:47 +00:00
|
|
|
# reddit
|
|
|
|
if urlparse.urlparse(item.link).netloc == 'www.reddit.com':
|
|
|
|
match = lxml.html.fromstring(item.desc).xpath('//a[text()="[link]"]/@href')
|
|
|
|
if len(match):
|
|
|
|
item.link = match[0]
|
|
|
|
log(item.link)
|
|
|
|
|
2013-05-15 15:13:57 +00:00
|
|
|
# check relative urls
|
|
|
|
if urlparse.urlparse(item.link).netloc is '':
|
|
|
|
item.link = urlparse.urljoin(feedurl, item.link)
|
|
|
|
|
2013-06-02 21:45:41 +00:00
|
|
|
# check unwanted uppercase title
|
2013-07-14 16:44:11 +00:00
|
|
|
if len(item.title) > 20 and item.title.isupper():
|
|
|
|
item.title = item.title.title()
|
2013-06-02 21:45:41 +00:00
|
|
|
|
2013-05-15 15:32:42 +00:00
|
|
|
# content already provided?
|
2013-07-14 16:57:12 +00:00
|
|
|
count_content = countWord(item.content)
|
|
|
|
count_desc = countWord(item.desc)
|
|
|
|
|
|
|
|
log('desc: %s words, content: %s words' % (count_content, count_desc))
|
|
|
|
|
|
|
|
if max(count_content, count_desc) > 500:
|
|
|
|
log('long enough')
|
|
|
|
return True
|
|
|
|
|
|
|
|
if count_content > 5*count_desc > 0 and count_content > 50:
|
|
|
|
log('content bigger enough')
|
|
|
|
return True
|
2013-05-15 15:32:42 +00:00
|
|
|
|
2013-05-01 15:56:03 +00:00
|
|
|
# check cache and previous errors
|
2013-04-22 22:00:07 +00:00
|
|
|
if item.link in cache:
|
2013-05-01 15:56:03 +00:00
|
|
|
content = cache.get(item.link)
|
2013-05-15 15:24:27 +00:00
|
|
|
match = re.search(r'^error-([a-z]{2,10})$', content)
|
|
|
|
if match:
|
2013-05-01 15:56:03 +00:00
|
|
|
if cache.isYoungerThan(DELAY*60):
|
2013-05-15 15:24:27 +00:00
|
|
|
log('cached error: %s' % match.groups()[0])
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-05-01 15:56:03 +00:00
|
|
|
else:
|
2013-05-15 15:24:27 +00:00
|
|
|
log('old error')
|
2013-05-01 15:56:03 +00:00
|
|
|
else:
|
|
|
|
log('cached')
|
2013-07-14 16:58:48 +00:00
|
|
|
setContent(item, cache.get(item.link))
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-05 13:30:06 +00:00
|
|
|
# super-fast mode
|
2013-05-15 15:56:58 +00:00
|
|
|
if fast:
|
|
|
|
log('skipped')
|
2013-05-15 15:38:52 +00:00
|
|
|
return False
|
2013-05-05 13:30:06 +00:00
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
# download
|
2013-06-08 15:32:55 +00:00
|
|
|
ddl = EncDownload(item.link.encode('utf-8'))
|
2013-04-15 16:51:55 +00:00
|
|
|
|
|
|
|
if ddl is False:
|
2013-05-01 15:56:03 +00:00
|
|
|
log('http error')
|
2013-05-15 15:24:27 +00:00
|
|
|
cache.set(item.link, 'error-http')
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-06-08 15:32:55 +00:00
|
|
|
data, url = ddl
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-06-08 15:32:55 +00:00
|
|
|
out = readability.Document(data, url=url).summary(True)
|
2013-07-14 16:57:12 +00:00
|
|
|
if countWord(out) > max(count_content, count_desc) > 0:
|
2013-07-14 16:58:48 +00:00
|
|
|
setContent(item, out)
|
2013-05-15 15:24:27 +00:00
|
|
|
cache.set(item.link, out)
|
|
|
|
else:
|
|
|
|
log('not bigger enough')
|
|
|
|
cache.set(item.link, 'error-length')
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-15 15:38:52 +00:00
|
|
|
return True
|
2013-04-15 16:51:55 +00:00
|
|
|
|
2013-05-01 15:57:09 +00:00
|
|
|
def Gather(url, cachePath, mode='feed'):
|
2013-04-22 22:00:07 +00:00
|
|
|
cache = Cache(cachePath, url)
|
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
# fetch feed
|
2013-04-22 22:00:07 +00:00
|
|
|
if cache.isYoungerThan(DELAY*60) and url in cache:
|
|
|
|
log('xml cached')
|
|
|
|
xml = cache.get(url)
|
2013-04-15 16:51:55 +00:00
|
|
|
else:
|
2013-04-22 22:00:07 +00:00
|
|
|
try:
|
|
|
|
req = urllib2.Request(url)
|
2013-05-01 15:54:17 +00:00
|
|
|
req.add_unredirected_header('User-Agent', UA_RSS)
|
2013-04-22 22:00:07 +00:00
|
|
|
xml = urllib2.urlopen(req).read()
|
|
|
|
cache.set(url, xml)
|
|
|
|
except (urllib2.HTTPError, urllib2.URLError):
|
|
|
|
return False
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
xml = cleanXML(xml)
|
2013-07-14 16:44:11 +00:00
|
|
|
rss = feeds.parse(xml)
|
|
|
|
size = len(rss)
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
# set
|
2013-05-15 15:56:58 +00:00
|
|
|
startTime = time.time()
|
2013-07-14 16:44:11 +00:00
|
|
|
for i, item in enumerate(rss.items):
|
2013-05-01 15:57:09 +00:00
|
|
|
if mode == 'progress':
|
2013-05-15 15:56:58 +00:00
|
|
|
if MAX_ITEM == 0:
|
|
|
|
print "%s/%s" % (i+1, size)
|
|
|
|
else:
|
|
|
|
print "%s/%s" % (i+1, min(MAX_ITEM, size))
|
2013-05-01 15:57:09 +00:00
|
|
|
sys.stdout.flush()
|
2013-05-15 15:56:58 +00:00
|
|
|
|
|
|
|
if i+1 > LIM_ITEM > 0:
|
2013-07-14 16:44:11 +00:00
|
|
|
item.remove()
|
2013-05-15 15:56:58 +00:00
|
|
|
elif time.time() - startTime > MAX_TIME >= 0 or i+1 > MAX_ITEM > 0:
|
|
|
|
if Fill(item, cache, url, True) is False:
|
2013-07-14 16:44:11 +00:00
|
|
|
item.remove()
|
2013-05-15 15:56:58 +00:00
|
|
|
else:
|
|
|
|
Fill(item, cache, url)
|
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
log(len(rss))
|
2013-04-04 15:43:30 +00:00
|
|
|
|
2013-07-14 16:44:11 +00:00
|
|
|
return rss.tostring(xml_declaration=True, encoding='UTF-8')
|
2013-04-04 15:43:30 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2013-06-25 11:13:23 +00:00
|
|
|
url, options = parseOptions(OPTIONS)
|
2013-05-01 15:57:09 +00:00
|
|
|
|
2013-06-25 11:13:23 +00:00
|
|
|
if 'REQUEST_URI' in os.environ:
|
2013-04-16 14:11:34 +00:00
|
|
|
print 'Status: 200'
|
2013-05-05 13:30:06 +00:00
|
|
|
|
|
|
|
if options == 'progress':
|
2013-06-25 11:13:23 +00:00
|
|
|
print 'Content-Type: application/octet-stream'
|
2013-05-05 13:30:06 +00:00
|
|
|
else:
|
2013-06-28 11:34:12 +00:00
|
|
|
print 'Content-Type: text/xml'
|
2013-06-25 11:13:23 +00:00
|
|
|
print
|
2013-04-16 14:11:34 +00:00
|
|
|
|
2013-04-15 16:51:55 +00:00
|
|
|
cache = os.getcwd() + '/cache'
|
2013-04-04 15:43:30 +00:00
|
|
|
log(url)
|
|
|
|
else:
|
2013-05-01 15:57:09 +00:00
|
|
|
cache = os.path.expanduser('~') + '/.cache/morss'
|
2013-05-15 15:56:58 +00:00
|
|
|
|
2013-06-25 11:13:23 +00:00
|
|
|
if url is None:
|
|
|
|
print "Please provide url."
|
|
|
|
sys.exit(1)
|
|
|
|
|
2013-05-15 15:56:58 +00:00
|
|
|
if options == 'progress':
|
|
|
|
MAX_TIME = -1
|
|
|
|
if options == 'cache':
|
|
|
|
MAX_TIME = 0
|
|
|
|
|
|
|
|
RSS = Gather(url, cache, options)
|
2013-05-01 15:57:09 +00:00
|
|
|
|
|
|
|
if RSS is not False and options != 'progress':
|
|
|
|
if 'REQUEST_URI' in os.environ or not os.getenv('DEBUG', False):
|
|
|
|
print RSS
|
|
|
|
|
|
|
|
if RSS is False and options != 'progress':
|
|
|
|
print "Error fetching feed."
|
2013-04-15 16:51:55 +00:00
|
|
|
|
|
|
|
log('done')
|