2013-07-14 16:25:49 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2015-03-02 16:59:33 +00:00
|
|
|
import sys
|
2015-05-04 14:26:31 +00:00
|
|
|
import os.path
|
2015-03-02 16:59:33 +00:00
|
|
|
|
2013-10-08 22:48:05 +00:00
|
|
|
from datetime import datetime
|
2014-06-21 23:59:01 +00:00
|
|
|
|
|
|
|
import re
|
2014-01-12 23:08:03 +00:00
|
|
|
import json
|
|
|
|
import csv
|
2014-06-21 23:59:01 +00:00
|
|
|
|
|
|
|
from lxml import etree
|
|
|
|
from dateutil import tz
|
|
|
|
import dateutil.parser
|
2018-11-09 21:02:44 +00:00
|
|
|
from copy import deepcopy
|
2014-01-12 23:08:03 +00:00
|
|
|
|
2015-03-24 15:23:40 +00:00
|
|
|
from . import crawler
|
|
|
|
|
2017-03-09 01:38:59 +00:00
|
|
|
from wheezy.template.engine import Engine
|
|
|
|
from wheezy.template.loader import DictLoader
|
|
|
|
from wheezy.template.ext.core import CoreExtension
|
2014-05-29 12:12:16 +00:00
|
|
|
|
2014-01-12 23:08:03 +00:00
|
|
|
json.encoder.c_make_encoder = None
|
|
|
|
|
|
|
|
try:
|
2014-06-21 16:35:59 +00:00
|
|
|
from collections import OrderedDict
|
2015-03-02 16:57:09 +00:00
|
|
|
except ImportError:
|
|
|
|
# python < 2.7
|
|
|
|
from ordereddict import OrderedDict
|
|
|
|
|
|
|
|
try:
|
2015-02-25 10:22:38 +00:00
|
|
|
from StringIO import StringIO
|
|
|
|
from urllib2 import urlopen
|
2018-11-09 21:02:44 +00:00
|
|
|
from ConfigParser import ConfigParser
|
2014-01-12 23:08:03 +00:00
|
|
|
except ImportError:
|
2015-03-02 16:57:09 +00:00
|
|
|
# python > 3
|
2015-02-25 10:22:38 +00:00
|
|
|
from io import StringIO
|
|
|
|
from urllib.request import urlopen
|
2018-11-09 21:02:44 +00:00
|
|
|
from configparser import ConfigParser
|
2015-02-25 10:22:38 +00:00
|
|
|
|
2015-02-25 16:50:23 +00:00
|
|
|
try:
|
|
|
|
basestring
|
|
|
|
except NameError:
|
|
|
|
basestring = unicode = str
|
|
|
|
|
2014-01-12 23:08:03 +00:00
|
|
|
|
2018-11-09 21:02:44 +00:00
|
|
|
def parse_rules(filename=None):
|
|
|
|
if not filename:
|
|
|
|
filename = os.path.join(os.path.dirname(__file__), 'feedify.ini')
|
|
|
|
|
|
|
|
config = ConfigParser()
|
|
|
|
config.read(filename)
|
|
|
|
|
|
|
|
rules = dict([(x, dict(config.items(x))) for x in config.sections()])
|
|
|
|
|
|
|
|
for section in rules.keys():
|
|
|
|
for arg in rules[section].keys():
|
|
|
|
if '\n' in rules[section][arg]:
|
|
|
|
rules[section][arg] = rules[section][arg].split('\n')[1:]
|
|
|
|
|
|
|
|
return rules
|
|
|
|
|
|
|
|
|
|
|
|
class ParserBase(object):
|
|
|
|
def __init__(self, data=None, rules=None):
|
|
|
|
if rules is None:
|
|
|
|
rules = parse_rules()['rss']
|
|
|
|
|
|
|
|
if data is None:
|
|
|
|
data = rules['base'][0]
|
|
|
|
|
|
|
|
self.rules = rules
|
|
|
|
self.root = self.parse(data)
|
|
|
|
|
|
|
|
# do `if multi` and select the correct rule for each (and split \n)
|
|
|
|
if isinstance(self.rules['items'], list):
|
|
|
|
for (i, rule) in enumerate(self.rules['items']):
|
|
|
|
if self.rule_search(rule) is not None:
|
|
|
|
key = i
|
|
|
|
break
|
|
|
|
|
|
|
|
else:
|
|
|
|
key = 0
|
|
|
|
|
2018-11-11 14:21:43 +00:00
|
|
|
len_items = len(rules['items'])
|
|
|
|
|
2018-11-09 21:02:44 +00:00
|
|
|
for arg in self.rules.keys():
|
2018-11-11 14:21:43 +00:00
|
|
|
if (isinstance(self.rules[arg], list)
|
|
|
|
and len(self.rules[arg]) == len_items):
|
2018-11-09 21:02:44 +00:00
|
|
|
self.rules[arg] = self.rules[arg][key]
|
|
|
|
|
|
|
|
def parse(self, raw):
|
|
|
|
pass
|
|
|
|
|
2018-11-11 14:25:03 +00:00
|
|
|
def remove(self):
|
|
|
|
# delete oneslf
|
|
|
|
pass
|
|
|
|
|
2018-11-09 21:02:44 +00:00
|
|
|
def tostring(self):
|
|
|
|
# output in its input format
|
|
|
|
# to output in sth fancy (json, csv, html), change class type
|
|
|
|
pass
|
|
|
|
|
2018-11-09 21:04:46 +00:00
|
|
|
def tojson(self, indent=None):
|
|
|
|
# TODO temporary
|
|
|
|
return json.dumps(OrderedDict(self.iterdic()), indent=indent)
|
|
|
|
|
|
|
|
def tocsv(self):
|
|
|
|
# TODO temporary
|
|
|
|
out = StringIO()
|
|
|
|
c = csv.writer(out, dialect=csv.excel)
|
|
|
|
|
|
|
|
for item in self.items:
|
|
|
|
row = [getattr(item, x) for x in item.dic]
|
|
|
|
|
|
|
|
if sys.version_info[0] < 3:
|
|
|
|
row = [x.encode('utf-8') if isinstance(x, unicode) else x for x in row]
|
|
|
|
|
|
|
|
c.writerow(row)
|
|
|
|
|
|
|
|
out.seek(0)
|
|
|
|
return out.read()
|
|
|
|
|
|
|
|
def tohtml(self):
|
|
|
|
# TODO temporary
|
|
|
|
path = os.path.join(os.path.dirname(__file__), 'reader.html.template')
|
|
|
|
loader = DictLoader({'reader': open(path).read()})
|
|
|
|
engine = Engine(loader=loader, extensions=[CoreExtension()])
|
|
|
|
template = engine.get_template('reader')
|
|
|
|
return template.render({'feed': self}).encode('utf-8')
|
|
|
|
|
2018-11-09 21:02:44 +00:00
|
|
|
def iterdic(self):
|
|
|
|
for element in self.dic:
|
|
|
|
value = getattr(self, element)
|
|
|
|
|
|
|
|
if element == 'items':
|
|
|
|
value = [OrderedDict(x.iterdic()) for x in value]
|
|
|
|
elif isinstance(value, datetime):
|
|
|
|
value = value.isoformat()
|
|
|
|
|
|
|
|
yield element, value
|
|
|
|
|
|
|
|
def rule_search(self, rule):
|
|
|
|
# xpath, return the first one only
|
|
|
|
try:
|
|
|
|
return self.rule_search_all(rule)[0]
|
|
|
|
|
|
|
|
except IndexError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def rule_search_all(self, rule):
|
|
|
|
# xpath, return all (useful to find feed items)
|
|
|
|
pass
|
|
|
|
|
|
|
|
def rule_search_last(self, rule):
|
|
|
|
# xpath, return the first one only
|
|
|
|
try:
|
|
|
|
return self.rule_search_all(rule)[-1]
|
|
|
|
|
|
|
|
except IndexError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def rule_create(self, rule):
|
|
|
|
# create node based on rule
|
|
|
|
# (duplicate, copy existing (or template) or create from scratch, if possible)
|
|
|
|
# --> might want to create node_duplicate helper fns
|
|
|
|
pass
|
|
|
|
|
|
|
|
def rule_remove(self, rule):
|
|
|
|
# remove node from its parent
|
|
|
|
pass
|
|
|
|
|
|
|
|
def rule_set(self, rule, value):
|
|
|
|
# value is always a str?
|
|
|
|
pass
|
|
|
|
|
|
|
|
def rule_str(self, rule):
|
|
|
|
# GETs inside (pure) text from it
|
|
|
|
pass
|
|
|
|
|
|
|
|
def bool_prs(self, x):
|
|
|
|
# parse
|
|
|
|
pass
|
|
|
|
|
|
|
|
def bool_fmt(self, x):
|
|
|
|
# format
|
|
|
|
pass
|
|
|
|
|
|
|
|
def time_prs(self, x):
|
|
|
|
# parse
|
|
|
|
pass
|
|
|
|
|
|
|
|
def time_fmt(self, x):
|
|
|
|
# format
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_raw(self, rule_name):
|
|
|
|
# get the raw output, for self.get_raw('items')
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_str(self, rule_name):
|
|
|
|
# simple function to get nice text from the rule name
|
|
|
|
# for use in @property, ie. self.get_str('title')
|
|
|
|
pass
|
|
|
|
|
|
|
|
def set_str(self, rule_name):
|
|
|
|
pass
|
|
|
|
|
2018-11-11 14:25:03 +00:00
|
|
|
def rmv(self, rule_name):
|
2018-11-09 21:02:44 +00:00
|
|
|
# easy deleter
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
class ParserXML(ParserBase):
|
2018-11-18 14:14:38 +00:00
|
|
|
NSMAP = {'atom': 'http://www.w3.org/2005/Atom',
|
|
|
|
'atom03': 'http://purl.org/atom/ns#',
|
|
|
|
'media': 'http://search.yahoo.com/mrss/',
|
|
|
|
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
|
|
|
|
'slash': 'http://purl.org/rss/1.0/modules/slash/',
|
|
|
|
'dc': 'http://purl.org/dc/elements/1.1/',
|
|
|
|
'content': 'http://purl.org/rss/1.0/modules/content/',
|
|
|
|
'rssfake': 'http://purl.org/rss/1.0/'}
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
def parse(self, raw):
|
|
|
|
parser = etree.XMLParser(recover=True)
|
|
|
|
return etree.fromstring(raw, parser)
|
|
|
|
|
2018-11-11 14:25:03 +00:00
|
|
|
def remove(self):
|
|
|
|
return self.root.getparent().remove(self.root)
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
def tostring(self, **k):
|
|
|
|
return etree.tostring(self.root, **k)
|
|
|
|
|
|
|
|
def _rule_parse(self, rule):
|
|
|
|
test = re.search(r'^(.*)/@([a-z]+)$', rule) # to match //div/a/@href
|
|
|
|
return test.groups() if test else (rule, None)
|
|
|
|
|
|
|
|
def _resolve_ns(self, rule):
|
2018-11-11 14:26:46 +00:00
|
|
|
match = re.search(r'^([^:]+):([^:]+)$', rule) # to match fakerss:content
|
2018-11-09 21:04:08 +00:00
|
|
|
if match:
|
|
|
|
match = match.groups()
|
2018-11-18 14:14:38 +00:00
|
|
|
if match[0] in self.NSMAP:
|
|
|
|
return "{%s}%s" % (self.NSMAP[match[0]], match[1].lower())
|
2018-11-09 21:04:08 +00:00
|
|
|
|
|
|
|
return rule
|
|
|
|
|
2018-11-11 14:21:06 +00:00
|
|
|
@staticmethod
|
|
|
|
def _inner_html(xml):
|
|
|
|
return (xml.text or '') + ''.join([etree.tostring(child) for child in xml])
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _clean_node(xml):
|
|
|
|
[xml.remove(child) for child in xml]
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
def rule_search_all(self, rule):
|
|
|
|
try:
|
2018-11-18 14:14:38 +00:00
|
|
|
return self.root.xpath(rule, namespaces=self.NSMAP)
|
2018-11-09 21:04:08 +00:00
|
|
|
|
|
|
|
except etree.XPathEvalError:
|
|
|
|
return []
|
|
|
|
|
|
|
|
def rule_create(self, rule):
|
|
|
|
# duplicate, copy from template or create from scratch
|
|
|
|
rule, key = self._rule_parse(rule)
|
|
|
|
|
|
|
|
# try recreating based on the rule (for really basic rules, ie. plain RSS)
|
|
|
|
if re.search(r'^[a-zA-Z0-9/:]+$', rule):
|
|
|
|
chain = rule.strip('/').split('/')
|
|
|
|
current = self.root
|
|
|
|
|
|
|
|
if rule[0] == '/':
|
|
|
|
chain = chain[1:]
|
|
|
|
|
|
|
|
for (i, node) in enumerate(chain):
|
|
|
|
test = current.find(self._resolve_ns(node))
|
|
|
|
|
|
|
|
if test and i < len(chain) - 1:
|
|
|
|
# yay, go on
|
|
|
|
current = test
|
|
|
|
|
|
|
|
else:
|
|
|
|
# opps need to create
|
|
|
|
element = etree.Element(self._resolve_ns(node))
|
|
|
|
current.append(element)
|
|
|
|
current = element
|
|
|
|
|
|
|
|
return current
|
|
|
|
|
|
|
|
# try duplicating from existing (works well with fucked up structures)
|
|
|
|
match = self.rule_search_last(rule)
|
|
|
|
if match:
|
|
|
|
element = deepcopy(match)
|
2018-11-13 20:23:24 +00:00
|
|
|
match.getparent().append(element)
|
2018-11-09 21:04:08 +00:00
|
|
|
return element
|
|
|
|
|
|
|
|
# try duplicating from template
|
|
|
|
# FIXME
|
|
|
|
# >>> self.xml.getroottree().getpath(ff.find('a'))
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
def rule_remove(self, rule):
|
|
|
|
rule, key = self._rule_parse(rule)
|
|
|
|
|
|
|
|
match = self.rule_search(rule)
|
|
|
|
|
2018-11-11 15:33:36 +00:00
|
|
|
if match is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
elif key is not None:
|
2018-11-09 21:04:08 +00:00
|
|
|
del x.attrib[key]
|
|
|
|
|
|
|
|
else:
|
|
|
|
match.getparent().remove(match)
|
|
|
|
|
|
|
|
def rule_set(self, rule, value):
|
|
|
|
rule, key = self._rule_parse(rule)
|
|
|
|
|
|
|
|
match = self.rule_search(rule)
|
|
|
|
|
|
|
|
if key is not None:
|
|
|
|
match.attrib[key] = value
|
|
|
|
|
|
|
|
else:
|
2018-11-11 14:31:46 +00:00
|
|
|
if match is not None and len(match):
|
2018-11-11 14:21:06 +00:00
|
|
|
# atom stuff
|
|
|
|
self._clean_node(match)
|
|
|
|
|
|
|
|
if match.attrib.get('type', '') == 'xhtml':
|
|
|
|
match.attrib['type'] = 'html'
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
match.text = value
|
|
|
|
|
|
|
|
def rule_str(self, rule):
|
|
|
|
match = self.rule_search(rule)
|
|
|
|
|
|
|
|
if isinstance(match, etree._Element):
|
2018-11-11 14:21:06 +00:00
|
|
|
if len(match):
|
|
|
|
# atom stuff
|
|
|
|
return self._inner_html(match)
|
|
|
|
|
|
|
|
else:
|
|
|
|
return match.text or ""
|
2018-11-09 21:04:08 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
return match or ""
|
|
|
|
|
|
|
|
def bool_prs(self, x):
|
|
|
|
return (x or '').lower() != 'false'
|
|
|
|
|
|
|
|
def bool_fmt(self, x):
|
|
|
|
return 'true' if x else 'false'
|
|
|
|
|
|
|
|
def time_prs(self, x):
|
|
|
|
try:
|
|
|
|
return parse_time(x)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def time_fmt(self, x):
|
|
|
|
try:
|
|
|
|
time = parse_time(x)
|
|
|
|
return time.strftime(self.rules['timeformat'])
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_raw(self, rule_name):
|
|
|
|
return self.rule_search_all(self.rules[rule_name])
|
|
|
|
|
|
|
|
def get_str(self, rule_name):
|
|
|
|
return self.rule_str(self.rules[rule_name])
|
|
|
|
|
|
|
|
def set_str(self, rule_name, value):
|
|
|
|
try:
|
|
|
|
return self.rule_set(self.rules[rule_name], value)
|
|
|
|
|
|
|
|
except AttributeError:
|
|
|
|
# does not exist, have to create it
|
|
|
|
self.rule_create(self.rules[rule_name])
|
|
|
|
return self.rule_set(self.rules[rule_name], value)
|
|
|
|
|
2018-11-11 14:25:03 +00:00
|
|
|
def rmv(self, rule_name):
|
2018-11-09 21:04:08 +00:00
|
|
|
self.rule_remove(self.rules[rule_name])
|
|
|
|
|
|
|
|
|
2014-06-21 23:59:01 +00:00
|
|
|
def parse_time(value):
|
2014-06-21 16:35:59 +00:00
|
|
|
if isinstance(value, basestring):
|
|
|
|
if re.match(r'^[0-9]+$', value):
|
|
|
|
return datetime.fromtimestamp(int(value), tz.tzutc())
|
|
|
|
else:
|
|
|
|
return dateutil.parser.parse(value, tzinfos=tz.tzutc)
|
|
|
|
elif isinstance(value, int):
|
|
|
|
return datetime.fromtimestamp(value, tz.tzutc())
|
|
|
|
elif isinstance(value, datetime):
|
|
|
|
return value
|
|
|
|
else:
|
|
|
|
return False
|
2013-10-08 22:48:05 +00:00
|
|
|
|
2014-06-21 23:59:01 +00:00
|
|
|
|
2018-10-31 20:47:19 +00:00
|
|
|
class Uniq(object):
|
|
|
|
_map = {}
|
|
|
|
_id = None
|
|
|
|
|
|
|
|
def __new__(cls, *args, **kwargs):
|
2018-11-18 14:14:38 +00:00
|
|
|
# check if a wrapper was already created for it
|
2018-10-31 20:47:19 +00:00
|
|
|
# if so, reuse it
|
|
|
|
# if not, create a new one
|
2018-11-18 14:14:38 +00:00
|
|
|
# note that the item itself (the tree node) is created beforehands
|
2018-10-31 20:47:19 +00:00
|
|
|
|
|
|
|
tmp_id = cls._gen_id(*args, **kwargs)
|
2018-11-18 14:14:38 +00:00
|
|
|
if tmp_id in cls._map:
|
2018-10-31 20:47:19 +00:00
|
|
|
return cls._map[tmp_id]
|
|
|
|
|
|
|
|
else:
|
|
|
|
obj = object.__new__(cls, *args, **kwargs)
|
2018-11-18 14:14:38 +00:00
|
|
|
cls._map[tmp_id] = obj
|
2018-10-31 20:47:19 +00:00
|
|
|
return obj
|
2014-06-21 23:59:01 +00:00
|
|
|
|
|
|
|
|
2018-11-09 21:02:44 +00:00
|
|
|
class Feed(object):
|
|
|
|
itemsClass = 'Item'
|
|
|
|
dic = ('title', 'desc', 'items')
|
|
|
|
|
|
|
|
def wrap_items(self, items):
|
|
|
|
itemsClass = globals()[self.itemsClass]
|
|
|
|
return [itemsClass(x, self.rules) for x in items]
|
|
|
|
|
|
|
|
title = property(
|
|
|
|
lambda f: f.get_str('title'),
|
|
|
|
lambda f,x: f.set_str('title', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('title') )
|
2018-11-09 21:02:44 +00:00
|
|
|
description = desc = property(
|
|
|
|
lambda f: f.get_str('desc'),
|
|
|
|
lambda f,x: f.set_str('desc', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('desc') )
|
2018-11-09 21:02:44 +00:00
|
|
|
items = property(
|
|
|
|
lambda f: f )
|
|
|
|
|
|
|
|
def append(self, new=None):
|
|
|
|
self.rule_create(self.rules['items'])
|
|
|
|
item = self.items[-1]
|
|
|
|
|
|
|
|
if new is None:
|
|
|
|
return item
|
|
|
|
|
|
|
|
for attr in globals()[self.itemsClass].dic:
|
|
|
|
if hasattr(new, attr):
|
2018-11-11 15:33:18 +00:00
|
|
|
setattr(item, attr, getattr(new, attr))
|
2018-11-09 21:02:44 +00:00
|
|
|
|
2018-11-11 15:33:18 +00:00
|
|
|
elif attr in new:
|
|
|
|
setattr(item, attr, new[attr])
|
2018-11-09 21:02:44 +00:00
|
|
|
|
|
|
|
def __getitem__(self, key):
|
|
|
|
return self.wrap_items(self.get_raw('items'))[key]
|
|
|
|
|
|
|
|
def __delitem__(self, key):
|
2018-11-11 14:25:03 +00:00
|
|
|
self[key].rmv()
|
2018-11-09 21:02:44 +00:00
|
|
|
|
|
|
|
def __len__(self):
|
|
|
|
return len(self.get_raw('items'))
|
|
|
|
|
|
|
|
|
|
|
|
class Item(Uniq):
|
|
|
|
dic = ('title', 'link', 'desc', 'content', 'id', 'is_permalink', 'time', 'updated')
|
|
|
|
|
|
|
|
def __init__(self, xml=None, rules=None):
|
|
|
|
self._id = self._gen_id(xml)
|
|
|
|
self.root = xml
|
|
|
|
self.rules = rules
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _gen_id(xml=None, *args, **kwargs):
|
|
|
|
return id(xml)
|
|
|
|
|
|
|
|
title = property(
|
|
|
|
lambda f: f.get_str('item_title'),
|
|
|
|
lambda f,x: f.set_str('item_title', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_title') )
|
2018-11-09 21:02:44 +00:00
|
|
|
link = property(
|
|
|
|
lambda f: f.get_str('item_link'),
|
|
|
|
lambda f,x: f.set_str('item_link', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_link') )
|
2018-11-09 21:02:44 +00:00
|
|
|
description = desc = property(
|
|
|
|
lambda f: f.get_str('item_desc'),
|
|
|
|
lambda f,x: f.set_str('item_desc', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_desc') )
|
2018-11-09 21:02:44 +00:00
|
|
|
content = property(
|
|
|
|
lambda f: f.get_str('item_content'),
|
|
|
|
lambda f,x: f.set_str('item_content', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_content') )
|
2018-11-09 21:02:44 +00:00
|
|
|
id = property(
|
|
|
|
lambda f: f.get_str('item_id'),
|
|
|
|
lambda f,x: f.set_str('item_id', x),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_id') )
|
2018-11-09 21:02:44 +00:00
|
|
|
is_permalink = property(
|
|
|
|
lambda f: f.get_str('item_is_permalink'),
|
|
|
|
lambda f,x: f.set_str('item_is_permalink', x))#,
|
2018-11-11 14:25:03 +00:00
|
|
|
#lambda f: f.rmv('item_is_permalink') )
|
2018-11-09 21:02:44 +00:00
|
|
|
time = property(
|
|
|
|
lambda f: f.time_fmt(f.get_str('item_time')),
|
|
|
|
lambda f,x: f.set_str('title', f.time_prs(x)),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_time') )
|
2018-11-09 21:02:44 +00:00
|
|
|
updated = property(
|
|
|
|
lambda f: f.time_fmt(f.get_str('item_updated')),
|
|
|
|
lambda f,x: f.set_str('updated', f.time_prs(x)),
|
2018-11-11 14:25:03 +00:00
|
|
|
lambda f: f.rmv('item_updated') )
|
2018-11-09 21:02:44 +00:00
|
|
|
|
|
|
|
|
2018-11-11 16:24:56 +00:00
|
|
|
class FeedXML(Feed, ParserXML):
|
|
|
|
itemsClass = 'ItemXML'
|
|
|
|
|
|
|
|
def tostring(self, **k):
|
|
|
|
return etree.tostring(self.root.getroottree(), **k)
|
|
|
|
|
|
|
|
|
2018-11-09 21:04:08 +00:00
|
|
|
class ItemXML(Item, ParserXML):
|
|
|
|
pass
|