namespace
stringlengths 12
102
| type
stringclasses 2
values | project_path
stringclasses 115
values | completion_path
stringlengths 20
110
| signature_position
listlengths 2
2
| body_position
listlengths 2
2
| requirement
dict | tests
listlengths 1
5
| indent
int64 2
12
| anchor_name
stringlengths 18
115
| anchor_text
dict | import_statements
listlengths 0
140
| target_function_prompt
stringlengths 15
74.4k
| prompt
stringlengths 308
842k
| target_function_name
stringlengths 2
63
| target_source
stringlengths 12
89
| example
stringlengths 0
23.4k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
benedict.utils.type_util.is_json_serializable
|
function
|
Text-Processing/python-benedict
|
Text-Processing/python-benedict/benedict/utils/type_util.py
|
[
53,
53
] |
[
54,
55
] |
{
"Arguments": ":param val: Any. The input value to be checked for JSON serializability.\n:return: Bool. True if the input value is JSON serializable, False otherwise.",
"Functionality": "Check if the input value is JSON serializable. It checks if the input value is of the JSON serializable types."
}
|
[
"tests/utils/test_type_util.py::type_util_test_case::test_is_json_serializable"
] | 4
|
is_json_serializable@python-benedict/benedict/utils/type_util.py
|
{
"code": "def is_json_serializable(val):\n json_types = (type(None), bool, dict, float, int, list, str, tuple)\n return isinstance(val, json_types)",
"description": "DOCSTRING",
"file_path": "python-benedict/benedict/utils/type_util.py",
"incoming_calls": [],
"name": "is_json_serializable",
"signature": "def is_json_serializable(val):\n"
}
|
[
"from datetime import datetime",
"import re",
"from decimal import Decimal",
"import pathlib"
] |
def is_json_serializable(val):
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE python-benedict/benedict/utils/type_util.py
from datetime import datetime
import re
from decimal import Decimal
import pathlib
regex = re.compile("").__class__
def is_bool(val):
return isinstance(val, bool)
def is_collection(val):
return isinstance(val, (dict, list, set, tuple))
def is_datetime(val):
return isinstance(val, datetime)
def is_decimal(val):
return isinstance(val, Decimal)
def is_dict(val):
return isinstance(val, dict)
def is_dict_or_list(val):
return isinstance(val, (dict, list))
def is_dict_or_list_or_tuple(val):
return isinstance(val, (dict, list, tuple))
def is_float(val):
return isinstance(val, float)
def is_function(val):
return callable(val)
def is_integer(val):
return isinstance(val, int)
def is_list(val):
return isinstance(val, list)
def is_list_or_tuple(val):
return isinstance(val, (list, tuple))
def is_none(val):
return val is None
def is_path(val):
return isinstance(val, pathlib.Path)
def is_regex(val):
return isinstance(val, regex)
def is_set(val):
return isinstance(val, set)
def is_string(val):
return isinstance(val, str)
def is_tuple(val):
return isinstance(val, tuple)
def is_uuid(val):
return is_string(val) and uuid_re.match(val)
Based on the information above, please complete the function in the current file python-benedict/benedict/utils/type_util.py:
def is_json_serializable(val):
|
is_json_serializable
|
python-benedict/benedict/utils/type_util.py
| |
feedparser.urls.convert_to_idn
|
function
|
Text-Processing/feedparser
|
Text-Processing/feedparser/feedparser/urls.py
|
[
61,
61
] |
[
66,
83
] |
{
"Arguments": ":param url: String. The URL to be converted to IDN notation.\n:return: String. The URL in IDN notation.",
"Functionality": "Convert a URL to IDN notation. It checks if the host can be encoded in ASCII. If not, it converts the host to IDN form."
}
|
[
"tests/runtests.py::TestConvertToIdn::test_port",
"tests/runtests.py::TestConvertToIdn::test_idn",
"tests/runtests.py::TestConvertToIdn::test_control"
] | 4
|
convert_to_idn@feedparser/feedparser/urls.py
|
{
"code": "def convert_to_idn(url):\n \"\"\"Convert a URL to IDN notation\"\"\"\n # this function should only be called with a unicode string\n # strategy: if the host cannot be encoded in ascii, then\n # it'll be necessary to encode it in idn form\n parts = list(urllib.parse.urlsplit(url))\n try:\n parts[1].encode('ascii')\n except UnicodeEncodeError:\n # the url needs to be converted to idn notation\n host = parts[1].rsplit(':', 1)\n newhost = []\n port = ''\n if len(host) == 2:\n port = host.pop()\n for h in host[0].split('.'):\n newhost.append(h.encode('idna').decode('utf-8'))\n parts[1] = '.'.join(newhost)\n if port:\n parts[1] += ':' + port\n return urllib.parse.urlunsplit(parts)\n else:\n return url",
"description": "Convert a URL to IDN notation",
"file_path": "feedparser/feedparser/urls.py",
"incoming_calls": [
"get@feedparser/feedparser/http.py"
],
"name": "convert_to_idn",
"signature": "def convert_to_idn(url):\n"
}
|
[
"from .html import _BaseHTMLProcessor",
"import re",
"import urllib.parse"
] |
def convert_to_idn(url):
"""Convert a URL to IDN notation"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE feedparser/feedparser/urls.py
from .html import _BaseHTMLProcessor
import re
import urllib.parse
class RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = {
('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src'),
}
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolve_uri(self, uri):
return make_safe_absolute_uri(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolve_uri(value) or value) for key, value in attrs]
super(RelativeURIResolver, self).unknown_starttag(tag, attrs)
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
uri = urllib.parse.urljoin(base, uri)
except ValueError:
uri = ''
return uri
def make_safe_absolute_uri(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urllib.parse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
def resolve_relative_uris(html_source, base_uri, encoding, type_):
p = RelativeURIResolver(base_uri, encoding, type_)
p.feed(html_source)
return p.output()
Based on the information above, please complete the function in the current file feedparser/feedparser/urls.py:
def convert_to_idn(url):
"""Convert a URL to IDN notation"""
|
convert_to_idn
|
feedparser/feedparser/urls.py
|
def get(url, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, result=None):
if handlers is None:
handlers = []
elif not isinstance(handlers, list):
handlers = [handlers]
if request_headers is None:
request_headers = {}
# Deal with the feed URI scheme
if url.startswith('feed:http'):
url = url[5:]
elif url.startswith('feed:'):
url = 'http:' + url[5:]
if not agent:
from . import USER_AGENT
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if not url.startswith('ftp:'):
url_pieces = urllib.parse.urlparse(url)
if url_pieces.username:
new_pieces = list(url_pieces)
new_pieces[1] = url_pieces.hostname
if url_pieces.port:
new_pieces[1] = f'{url_pieces.hostname}:{url_pieces.port}'
url = urllib.parse.urlunparse(new_pieces)
auth = base64.standard_b64encode(f'{url_pieces.username}:{url_pieces.password}'.encode()).decode()
# iri support
if not isinstance(url, bytes):
url = convert_to_idn(url)
# Prevent UnicodeEncodeErrors caused by Unicode characters in the path.
bits = []
for c in url:
try:
c.encode('ascii')
except UnicodeEncodeError:
bits.append(urllib.parse.quote(c))
else:
bits.append(c)
url = ''.join(bits)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url, agent, ACCEPT_HEADER, etag, modified, referrer, auth, request_headers)
opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
f = opener.open(request)
data = f.read()
f.close()
# lowercase all of the HTTP headers for comparisons per RFC 2616
result['headers'] = {k.lower(): v for k, v in f.headers.items()}
# if feed is gzip-compressed, decompress it
if data and 'gzip' in result['headers'].get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=io.BytesIO(data)).read()
except (EOFError, IOError, struct.error) as e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = True
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif data and 'deflate' in result['headers'].get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error as e:
result['bozo'] = True
result['bozo_exception'] = e
# save HTTP headers
if 'etag' in result['headers']:
etag = result['headers'].get('etag', '')
if isinstance(etag, bytes):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in result['headers']:
modified = result['headers'].get('last-modified', '')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if isinstance(f.url, bytes):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = getattr(f, 'status', None) or 200
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return data
|
mistune.toc.add_toc_hook
|
function
|
Text-Processing/mistune
|
Text-Processing/mistune/src/mistune/toc.py
|
[
4,
4
] |
[
23,
44
] |
{
"Arguments": ":param md: Markdown instance. The instance of the Markdown class.\n:param min_level: Integer. The minimum heading level to include in the TOC.\n:param max_level: Integer. The maximum heading level to include in the TOC.\n:param heading_id: Function. A function to generate heading_id.\n:return: No return values.",
"Functionality": "This function adds a hook to save table of contents (TOC) items into the state.env. It is usually helpful for doc generator."
}
|
[
"tests/test_hooks.py::TestTocHook::test_customize_heading_id_func"
] | 4
|
add_toc_hook@mistune/src/mistune/toc.py
|
{
"code": "def add_toc_hook(md, min_level=1, max_level=3, heading_id=None):\n \"\"\"Add a hook to save toc items into ``state.env``. This is\n usually helpful for doc generator::\n\n import mistune\n from mistune.toc import add_toc_hook, render_toc_ul\n\n md = mistune.create_markdown(...)\n add_toc_hook(md)\n\n html, state = md.parse(text)\n toc_items = state.env['toc_items']\n toc_html = render_toc_ul(toc_items)\n\n :param md: Markdown instance\n :param min_level: min heading level\n :param max_level: max heading level\n :param heading_id: a function to generate heading_id\n \"\"\"\n if heading_id is None:\n def heading_id(token, index):\n return 'toc_' + str(index + 1)\n\n def toc_hook(md, state):\n headings = []\n\n for tok in state.tokens:\n if tok['type'] == 'heading':\n level = tok['attrs']['level']\n if min_level <= level <= max_level:\n headings.append(tok)\n\n toc_items = []\n for i, tok in enumerate(headings):\n tok['attrs']['id'] = heading_id(tok, i)\n toc_items.append(normalize_toc_item(md, tok))\n\n # save items into state\n state.env['toc_items'] = toc_items\n\n md.before_render_hooks.append(toc_hook)",
"description": "Add a hook to save toc items into ``state.env``. This is\nusually helpful for doc generator::\n\n import mistune\n from mistune.toc import add_toc_hook, render_toc_ul\n\n md = mistune.create_markdown(...)\n add_toc_hook(md)\n\n html, state = md.parse(text)\n toc_items = state.env['toc_items']\n toc_html = render_toc_ul(toc_items)\n\n:param md: Markdown instance\n:param min_level: min heading level\n:param max_level: max heading level\n:param heading_id: a function to generate heading_id",
"file_path": "mistune/src/mistune/toc.py",
"incoming_calls": [
"TestTocHook.parse@mistune/tests/test_hooks.py",
"TestTocHook.test_customize_heading_id_func@mistune/tests/test_hooks.py"
],
"name": "add_toc_hook",
"signature": "def add_toc_hook(md, min_level=1, max_level=3, heading_id=None):\n"
}
|
[
"from .util import striptags"
] |
def add_toc_hook(md, min_level=1, max_level=3, heading_id=None):
"""Add a hook to save toc items into ``state.env``. This is
usually helpful for doc generator::
import mistune
from mistune.toc import add_toc_hook, render_toc_ul
md = mistune.create_markdown(...)
add_toc_hook(md)
html, state = md.parse(text)
toc_items = state.env['toc_items']
toc_html = render_toc_ul(toc_items)
:param md: Markdown instance
:param min_level: min heading level
:param max_level: max heading level
:param heading_id: a function to generate heading_id
"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE mistune/src/mistune/util.py
#CURRENT FILE mistune/src/mistune/toc.py
from .util import striptags
def normalize_toc_item(md, token):
text = token['text']
tokens = md.inline(text, {})
html = md.renderer(tokens, {})
text = striptags(html)
attrs = token['attrs']
return attrs['level'], attrs['id'], text
def toc_hook(md, state):
headings = []
for tok in state.tokens:
if tok['type'] == 'heading':
level = tok['attrs']['level']
if min_level <= level <= max_level:
headings.append(tok)
toc_items = []
for i, tok in enumerate(headings):
tok['attrs']['id'] = heading_id(tok, i)
toc_items.append(normalize_toc_item(md, tok))
# save items into state
state.env['toc_items'] = toc_items
def render_toc_ul(toc):
"""Render a <ul> table of content HTML. The param "toc" should
be formatted into this structure::
[
(level, id, text),
]
For example::
[
(1, 'toc-intro', 'Introduction'),
(2, 'toc-install', 'Install'),
(2, 'toc-upgrade', 'Upgrade'),
(1, 'toc-license', 'License'),
]
"""
if not toc:
return ''
s = '<ul>\n'
levels = []
for level, k, text in toc:
item = '<a href="#{}">{}</a>'.format(k, text)
if not levels:
s += '<li>' + item
levels.append(level)
elif level == levels[-1]:
s += '</li>\n<li>' + item
elif level > levels[-1]:
s += '\n<ul>\n<li>' + item
levels.append(level)
else:
levels.pop()
while levels:
last_level = levels.pop()
if level == last_level:
s += '</li>\n</ul>\n</li>\n<li>' + item
levels.append(level)
break
elif level > last_level:
s += '</li>\n<li>' + item
levels.append(last_level)
levels.append(level)
break
else:
s += '</li>\n</ul>\n'
else:
levels.append(level)
s += '</li>\n<li>' + item
while len(levels) > 1:
s += '</li>\n</ul>\n'
levels.pop()
return s + '</li>\n</ul>\n'
def striptags(s: str):
return _striptags_re.sub('', s)
Based on the information above, please complete the function in the current file mistune/src/mistune/toc.py:
def add_toc_hook(md, min_level=1, max_level=3, heading_id=None):
"""Add a hook to save toc items into ``state.env``. This is
usually helpful for doc generator::
import mistune
from mistune.toc import add_toc_hook, render_toc_ul
md = mistune.create_markdown(...)
add_toc_hook(md)
html, state = md.parse(text)
toc_items = state.env['toc_items']
toc_html = render_toc_ul(toc_items)
:param md: Markdown instance
:param min_level: min heading level
:param max_level: max heading level
:param heading_id: a function to generate heading_id
"""
|
add_toc_hook
|
mistune/src/mistune/toc.py
|
def toc_hook(md, state):
headings = []
for tok in state.tokens:
if tok['type'] == 'heading':
level = tok['attrs']['level']
if min_level <= level <= max_level:
headings.append(tok)
toc_items = []
for i, tok in enumerate(headings):
tok['attrs']['id'] = heading_id(tok, i)
toc_items.append(normalize_toc_item(md, tok))
# save items into state
state.env['toc_items'] = toc_items
|
mistune.plugins.table.table_in_quote
|
function
|
Text-Processing/mistune
|
Text-Processing/mistune/src/mistune/plugins/table.py
|
[
170,
170
] |
[
172,
173
] |
{
"Arguments": ":param md: Markdown. The Markdown instance.\n:return: No return values.",
"Functionality": "This function enables the table plugin in block quotes by inserting rules for table and nptable before the paragraph in the block quote rules."
}
|
[
"tests/test_plugins.py::TestExtraPlugins::test_table_in_quote"
] | 4
|
table_in_quote@mistune/src/mistune/plugins/table.py
|
{
"code": "def table_in_quote(md):\n \"\"\"Enable table plugin in block quotes.\"\"\"\n md.block.insert_rule(md.block.block_quote_rules, 'table', before='paragraph')\n md.block.insert_rule(md.block.block_quote_rules, 'nptable', before='paragraph')",
"description": "Enable table plugin in block quotes.",
"file_path": "mistune/src/mistune/plugins/table.py",
"incoming_calls": [],
"name": "table_in_quote",
"signature": "def table_in_quote(md):\n"
}
|
[
"from ..helpers import PREVENT_BACKSLASH",
"import re"
] |
def table_in_quote(md):
"""Enable table plugin in block quotes."""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE mistune/src/mistune/helpers.py
PREVENT_BACKSLASH = "(?<!\\)(?:\\\\)*"
#CURRENT FILE mistune/src/mistune/plugins/table.py
from ..helpers import PREVENT_BACKSLASH
import re
CELL_SPLIT = re.compile(r' *' + PREVENT_BACKSLASH + r'\| *')
NP_TABLE_PATTERN = "^ {0,3}(?P<nptable_head>\S.*\|.*)\n {0,3}(?P<nptable_align>[-:]+ *\|[-| :]*)\n(?P<nptable_body>(?:.*\|.*(?:\n|$))*)\n*"
TABLE_CELL = re.compile(r'^ {0,3}\|(.+)\|[ \t]*$')
TABLE_PATTERN = "^ {0,3}\|(?P<table_head>.+)\|[ \t]*\n {0,3}\|(?P<table_align> *[-:]+[-| :]*)\|[ \t]*\n(?P<table_body>(?: {0,3}\|.*\|[ \t]*(?:\n|$))*)\n*"
def _process_row(text, aligns):
cells = CELL_SPLIT.split(text)
if len(cells) != len(aligns):
return None
children = [
{
'type': 'table_cell',
'text': text.strip(),
'attrs': {'align': aligns[i], 'head': False}
}
for i, text in enumerate(cells)
]
return {'type': 'table_row', 'children': children}
def _process_thead(header, align):
headers = CELL_SPLIT.split(header)
aligns = CELL_SPLIT.split(align)
if len(headers) != len(aligns):
return None, None
for i, v in enumerate(aligns):
if ALIGN_CENTER.match(v):
aligns[i] = 'center'
elif ALIGN_LEFT.match(v):
aligns[i] = 'left'
elif ALIGN_RIGHT.match(v):
aligns[i] = 'right'
else:
aligns[i] = None
children = [
{
'type': 'table_cell',
'text': text.strip(),
'attrs': {'align': aligns[i], 'head': True}
}
for i, text in enumerate(headers)
]
thead = {'type': 'table_head', 'children': children}
return thead, aligns
def parse_nptable(block, m, state):
header = m.group('nptable_head')
align = m.group('nptable_align')
thead, aligns = _process_thead(header, align)
if not thead:
return
rows = []
body = m.group('nptable_body')
for text in body.splitlines():
row = _process_row(text, aligns)
if not row:
return
rows.append(row)
children = [thead, {'type': 'table_body', 'children': rows}]
state.append_token({'type': 'table', 'children': children})
return m.end()
def parse_table(block, m, state):
pos = m.end()
header = m.group('table_head')
align = m.group('table_align')
thead, aligns = _process_thead(header, align)
if not thead:
return
rows = []
body = m.group('table_body')
for text in body.splitlines():
m = TABLE_CELL.match(text)
if not m: # pragma: no cover
return
row = _process_row(m.group(1), aligns)
if not row:
return
rows.append(row)
children = [thead, {'type': 'table_body', 'children': rows}]
state.append_token({'type': 'table', 'children': children})
return pos
def render_table(renderer, text):
return '<table>\n' + text + '</table>\n'
def render_table_body(renderer, text):
return '<tbody>\n' + text + '</tbody>\n'
def render_table_cell(renderer, text, align=None, head=False):
if head:
tag = 'th'
else:
tag = 'td'
html = ' <' + tag
if align:
html += ' style="text-align:' + align + '"'
return html + '>' + text + '</' + tag + '>\n'
def render_table_head(renderer, text):
return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n'
def render_table_row(renderer, text):
return '<tr>\n' + text + '</tr>\n'
def table(md):
"""A mistune plugin to support table, spec defined at
https://michelf.ca/projects/php-markdown/extra/#table
Here is an example:
.. code-block:: text
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
:param md: Markdown instance
"""
md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph')
md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph')
if md.renderer and md.renderer.NAME == 'html':
md.renderer.register('table', render_table)
md.renderer.register('table_head', render_table_head)
md.renderer.register('table_body', render_table_body)
md.renderer.register('table_row', render_table_row)
md.renderer.register('table_cell', render_table_cell)
Based on the information above, please complete the function in the current file mistune/src/mistune/plugins/table.py:
def table_in_quote(md):
"""Enable table plugin in block quotes."""
|
table_in_quote
|
mistune/src/mistune/plugins/table.py
| |
mistune.plugins.table.table_in_list
|
function
|
Text-Processing/mistune
|
Text-Processing/mistune/src/mistune/plugins/table.py
|
[
176,
176
] |
[
178,
179
] |
{
"Arguments": ":param md: Markdown. The Markdown instance to enable the table plugin in the list.\n:return: No return values.",
"Functionality": "This function enables the table plugin in the list. It inserts the table and nptable rules before the paragraph rule in the list."
}
|
[
"tests/test_plugins.py::TestExtraPlugins::test_table_in_list"
] | 4
|
table_in_list@mistune/src/mistune/plugins/table.py
|
{
"code": "def table_in_list(md):\n \"\"\"Enable table plugin in list.\"\"\"\n md.block.insert_rule(md.block.list_rules, 'table', before='paragraph')\n md.block.insert_rule(md.block.list_rules, 'nptable', before='paragraph')",
"description": "Enable table plugin in list.",
"file_path": "mistune/src/mistune/plugins/table.py",
"incoming_calls": [],
"name": "table_in_list",
"signature": "def table_in_list(md):\n"
}
|
[
"from ..helpers import PREVENT_BACKSLASH",
"import re"
] |
def table_in_list(md):
"""Enable table plugin in list."""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE mistune/src/mistune/plugins/table.py
from ..helpers import PREVENT_BACKSLASH
import re
CELL_SPLIT = re.compile(r' *' + PREVENT_BACKSLASH + r'\| *')
NP_TABLE_PATTERN = "^ {0,3}(?P<nptable_head>\S.*\|.*)\n {0,3}(?P<nptable_align>[-:]+ *\|[-| :]*)\n(?P<nptable_body>(?:.*\|.*(?:\n|$))*)\n*"
TABLE_CELL = re.compile(r'^ {0,3}\|(.+)\|[ \t]*$')
TABLE_PATTERN = "^ {0,3}\|(?P<table_head>.+)\|[ \t]*\n {0,3}\|(?P<table_align> *[-:]+[-| :]*)\|[ \t]*\n(?P<table_body>(?: {0,3}\|.*\|[ \t]*(?:\n|$))*)\n*"
def _process_row(text, aligns):
cells = CELL_SPLIT.split(text)
if len(cells) != len(aligns):
return None
children = [
{
'type': 'table_cell',
'text': text.strip(),
'attrs': {'align': aligns[i], 'head': False}
}
for i, text in enumerate(cells)
]
return {'type': 'table_row', 'children': children}
def _process_thead(header, align):
headers = CELL_SPLIT.split(header)
aligns = CELL_SPLIT.split(align)
if len(headers) != len(aligns):
return None, None
for i, v in enumerate(aligns):
if ALIGN_CENTER.match(v):
aligns[i] = 'center'
elif ALIGN_LEFT.match(v):
aligns[i] = 'left'
elif ALIGN_RIGHT.match(v):
aligns[i] = 'right'
else:
aligns[i] = None
children = [
{
'type': 'table_cell',
'text': text.strip(),
'attrs': {'align': aligns[i], 'head': True}
}
for i, text in enumerate(headers)
]
thead = {'type': 'table_head', 'children': children}
return thead, aligns
def parse_nptable(block, m, state):
header = m.group('nptable_head')
align = m.group('nptable_align')
thead, aligns = _process_thead(header, align)
if not thead:
return
rows = []
body = m.group('nptable_body')
for text in body.splitlines():
row = _process_row(text, aligns)
if not row:
return
rows.append(row)
children = [thead, {'type': 'table_body', 'children': rows}]
state.append_token({'type': 'table', 'children': children})
return m.end()
def parse_table(block, m, state):
pos = m.end()
header = m.group('table_head')
align = m.group('table_align')
thead, aligns = _process_thead(header, align)
if not thead:
return
rows = []
body = m.group('table_body')
for text in body.splitlines():
m = TABLE_CELL.match(text)
if not m: # pragma: no cover
return
row = _process_row(m.group(1), aligns)
if not row:
return
rows.append(row)
children = [thead, {'type': 'table_body', 'children': rows}]
state.append_token({'type': 'table', 'children': children})
return pos
def render_table(renderer, text):
return '<table>\n' + text + '</table>\n'
def render_table_body(renderer, text):
return '<tbody>\n' + text + '</tbody>\n'
def render_table_cell(renderer, text, align=None, head=False):
if head:
tag = 'th'
else:
tag = 'td'
html = ' <' + tag
if align:
html += ' style="text-align:' + align + '"'
return html + '>' + text + '</' + tag + '>\n'
def render_table_head(renderer, text):
return '<thead>\n<tr>\n' + text + '</tr>\n</thead>\n'
def render_table_row(renderer, text):
return '<tr>\n' + text + '</tr>\n'
def table(md):
"""A mistune plugin to support table, spec defined at
https://michelf.ca/projects/php-markdown/extra/#table
Here is an example:
.. code-block:: text
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
:param md: Markdown instance
"""
md.block.register('table', TABLE_PATTERN, parse_table, before='paragraph')
md.block.register('nptable', NP_TABLE_PATTERN, parse_nptable, before='paragraph')
if md.renderer and md.renderer.NAME == 'html':
md.renderer.register('table', render_table)
md.renderer.register('table_head', render_table_head)
md.renderer.register('table_body', render_table_body)
md.renderer.register('table_row', render_table_row)
md.renderer.register('table_cell', render_table_cell)
Based on the information above, please complete the function in the current file mistune/src/mistune/plugins/table.py:
def table_in_list(md):
"""Enable table plugin in list."""
|
table_in_list
|
mistune/src/mistune/plugins/table.py
| |
xmnlp.utils.parallel_handler
|
function
|
Text-Processing/xmnlp
|
Text-Processing/xmnlp/xmnlp/utils/__init__.py
|
[
90,
92
] |
[
101,
107
] |
{
"Arguments": ":param callback: Callable. The callback function to be applied to the list of texts.\n:param texts: List[str]. The list of texts to be processed.\n:param n_jobs: int. The pool size of threads. Defaults to 2.\n:param kwargs: Any additional keyword arguments to be passed to the callback function.\n:return: Generator[List[Any], None, None]. A generator that yields the results of applying the callback function to the texts in parallel.",
"Functionality": "This function is a parallel handler that takes a callback function and a list of texts as input. It then processes the texts using the callback function in parallel using a thread pool executor. If the input `texts` is not a list, raise a ValueError(\"You should pass a list of texts\")."
}
|
[
"tests/test_xmnlp.py::test_radical_parallel",
"tests/test_xmnlp.py::test_pinyin_parallel"
] | 4
|
parallel_handler@xmnlp/xmnlp/utils/__init__.py
|
{
"code": "def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[\n List[Any], None, None\n]:\n \"\"\"parallel handler\n Args:\n callback: callback function\n texts: List[str]\n n_jobs: int, pool size of threads\n Return:\n Generator[List[str]]\n \"\"\"\n if not isinstance(texts, list):\n raise ValueError(\"You should pass a list of texts\")\n if kwargs:\n callback = partial(callback, **kwargs)\n with futures.ThreadPoolExecutor(max_workers=n_jobs) as executor:\n for ret in executor.map(callback, texts):\n yield ret",
"description": "parallel handler\nArgs:\n callback: callback function\n texts: List[str]\n n_jobs: int, pool size of threads\nReturn:\n Generator[List[str]]",
"file_path": "xmnlp/xmnlp/utils/__init__.py",
"incoming_calls": [],
"name": "parallel_handler",
"signature": "def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[\n List[Any], None, None\n]:\n"
}
|
[
"from typing import Any",
"from typing import Callable",
"from typing import Generator",
"from typing import List",
"import numpy",
"import re",
"from functools import partial",
"import concurrent.futures",
"import os"
] |
def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[
List[Any], None, None
]:
"""parallel handler
Args:
callback: callback function
texts: List[str]
n_jobs: int, pool size of threads
Return:
Generator[List[str]]
"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE PyLaTeX/pylatex/lists.py
#CURRENT FILE xmnlp/xmnlp/utils/__init__.py
from typing import Any
from typing import Callable
from typing import Generator
from typing import List
import numpy
import re
from functools import partial
import concurrent.futures
import os
class List(Environment):
"""A base class that represents a list."""
#: List environments cause compile errors when they do not contain items.
#: This is why they are omitted fully if they are empty.
omit_if_empty = True
def add_item(self, s):
"""Add an item to the list.
Args
----
s: str or `~.LatexObject`
The item itself.
"""
self.append(Command("item"))
self.append(s)
re_delimiter = re.compile('[,。?!;]')
re_line_skip = re.compile('[\r\n]')
def filelist(path: str) -> Generator[str, None, None]:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if not dirs:
for f in files:
yield os.sep.join([root, f])
else:
yield path
def load_stopword(fpath: str) -> List[str]:
"""load stopwords from file """
stopwords = set()
for fname in filelist(fpath):
with open(fname, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
stopwords.add(line)
return stopwords
def rematch(offsets):
""" rematch bert token
"""
mapping = []
for offset in offsets:
if offset[0] == 0 and offset[1] == 0:
mapping.append([])
else:
mapping.append([i for i in range(offset[0], offset[1])])
return mapping
def split_text(doc: str) -> List[str]:
sents = []
for line in re_line_skip.split(doc):
line = line.strip()
if not line:
continue
for sent in re_delimiter.split(line):
sent = sent.strip()
if not sent:
continue
sents.append(sent)
return sents
def topK(matrix, K, axis=1):
""" numpy topK
"""
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
topk_index = np.argpartition(-matrix, K, axis=axis)[0:K, :]
topk_data = matrix[topk_index, row_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:K, :][topk_index_sort, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
topk_index = np.argpartition(-matrix, K, axis=axis)[:, 0:K]
topk_data = matrix[column_index, topk_index]
topk_index_sort = np.argsort(-topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:K][column_index, topk_index_sort]
return topk_data_sort, topk_index_sort
Based on the information above, please complete the function in the current file xmnlp/xmnlp/utils/__init__.py:
def parallel_handler(callback: Callable, texts: List[str], n_jobs: int = 2, **kwargs) -> Generator[
List[Any], None, None
]:
"""parallel handler
Args:
callback: callback function
texts: List[str]
n_jobs: int, pool size of threads
Return:
Generator[List[str]]
"""
|
parallel_handler
|
xmnlp/xmnlp/utils/__init__.py
| |
parsel.utils.shorten
|
function
|
Text-Processing/parsel
|
Text-Processing/parsel/parsel/utils.py
|
[
87,
87
] |
[
89,
95
] |
{
"Arguments": ":param text: String. The input text to be shortened.\n:param width: Integer. The width to which the text should be shortened.\n:param suffix: String. The suffix to be added at the end of the shortened text. Defaults to \"...\".\n:return: String. The shortened text.",
"Functionality": "Shorten the given text to fit in the given width. If the length of the text is less than or equal to the width, the original text is returned. If the width is greater than the length of the suffix, the text is truncated to fit the width and the suffix is added. If the width is greater than or equal to 0, the suffix is returned based on the width. Otherwise, a ValueError(\"width must be equal or greater than 0\") is raised."
}
|
[
"tests/test_utils.py::test_shorten"
] | 4
|
shorten@parsel/parsel/utils.py
|
{
"code": "def shorten(text: str, width: int, suffix: str = \"...\") -> str:\n \"\"\"Truncate the given text to fit in the given width.\"\"\"\n if len(text) <= width:\n return text\n if width > len(suffix):\n return text[: width - len(suffix)] + suffix\n if width >= 0:\n return suffix[len(suffix) - width :]\n raise ValueError(\"width must be equal or greater than 0\")",
"description": "Truncate the given text to fit in the given width.",
"file_path": "parsel/parsel/utils.py",
"incoming_calls": [
"Selector.__str__@parsel/parsel/selector.py",
"test_shorten@parsel/tests/test_utils.py"
],
"name": "shorten",
"signature": "def shorten(text: str, width: int, suffix: str = \"...\") -> str:\n"
}
|
[
"from typing import Any",
"from typing import Iterable",
"from typing import Iterator",
"from typing import List",
"from typing import Match",
"from typing import Pattern",
"from typing import Union",
"from typing import cast",
"import re",
"from w3lib.html import replace_entities"
] |
def shorten(text: str, width: int, suffix: str = "...") -> str:
"""Truncate the given text to fit in the given width."""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE PyLaTeX/pylatex/lists.py
#FILE natasha/natasha/extractors.py
#CURRENT FILE parsel/parsel/utils.py
from typing import Any
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Match
from typing import Pattern
from typing import Union
from typing import cast
import re
from w3lib.html import replace_entities
class List(Environment):
"""A base class that represents a list."""
#: List environments cause compile errors when they do not contain items.
#: This is why they are omitted fully if they are empty.
omit_if_empty = True
def add_item(self, s):
"""Add an item to the list.
Args
----
s: str or `~.LatexObject`
The item itself.
"""
self.append(Command("item"))
self.append(s)
class Match(Record):
__attributes__ = ['start', 'stop', 'fact']
def extract_regex(
regex: Union[str, Pattern[str]], text: str, replace_entities: bool = True
) -> List[str]:
"""Extract a list of strings from the given text/encoding using the following policies:
* if the regex contains a named group called "extract" that will be returned
* if the regex contains multiple numbered groups, all those will be returned (flattened)
* if the regex doesn't contain any group the entire regex matching is returned
"""
if isinstance(regex, str):
regex = re.compile(regex, re.UNICODE)
if "extract" in regex.groupindex:
# named group
try:
extracted = cast(Match[str], regex.search(text)).group("extract")
except AttributeError:
strings = []
else:
strings = [extracted] if extracted is not None else []
else:
# full regex or numbered groups
strings = regex.findall(text)
strings = flatten(strings)
if not replace_entities:
return strings
return [w3lib_replace_entities(s, keep=["lt", "amp"]) for s in strings]
def flatten(x: Iterable[Any]) -> List[Any]:
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x: Iterable[Any]) -> Iterator[Any]:
"""iflatten(sequence) -> Iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if _is_listlike(el):
yield from flatten(el)
else:
yield el
Based on the information above, please complete the function in the current file parsel/parsel/utils.py:
def shorten(text: str, width: int, suffix: str = "...") -> str:
"""Truncate the given text to fit in the given width."""
|
shorten
|
parsel/parsel/utils.py
|
def __str__(self) -> str:
data = repr(shorten(self.get(), width=40))
return f"<{type(self).__name__} query={self._expr!r} data={data}>"
|
parsel.xpathfuncs.set_xpathfunc
|
function
|
Text-Processing/parsel
|
Text-Processing/parsel/parsel/xpathfuncs.py
|
[
13,
13
] |
[
27,
31
] |
{
"Arguments": ":param fname: String. The identifier under which the function will be registered.\n:param func: Callable. The function to be registered. If None, the extension function will be removed.\n:return: No return values.",
"Functionality": "This function registers a custom extension function to use in XPath expressions. The function registered under the fname identifier will be called for every matching node, being passed a context parameter as well as any parameters passed from the corresponding XPath expression."
}
|
[
"tests/test_xpathfuncs.py::XPathFuncsTestCase::test_set_xpathfunc"
] | 4
|
set_xpathfunc@parsel/parsel/xpathfuncs.py
|
{
"code": "def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg]\n \"\"\"Register a custom extension function to use in XPath expressions.\n\n The function ``func`` registered under ``fname`` identifier will be called\n for every matching node, being passed a ``context`` parameter as well as\n any parameters passed from the corresponding XPath expression.\n\n If ``func`` is ``None``, the extension function will be removed.\n\n See more `in lxml documentation`_.\n\n .. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions\n\n \"\"\"\n ns_fns = etree.FunctionNamespace(None) # type: ignore[attr-defined]\n if func is not None:\n ns_fns[fname] = func\n else:\n del ns_fns[fname]",
"description": "Register a custom extension function to use in XPath expressions.\n\nThe function ``func`` registered under ``fname`` identifier will be called\nfor every matching node, being passed a ``context`` parameter as well as\nany parameters passed from the corresponding XPath expression.\n\nIf ``func`` is ``None``, the extension function will be removed.\n\nSee more `in lxml documentation`_.\n\n.. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions",
"file_path": "parsel/parsel/xpathfuncs.py",
"incoming_calls": [
"setup@parsel/parsel/xpathfuncs.py",
"XPathFuncsTestCase.test_set_xpathfunc@parsel/tests/test_xpathfuncs.py"
],
"name": "set_xpathfunc",
"signature": "def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg]\n \"\"\"Register a custom extension function to use in XPath expressions.\n\n The function ``func`` registered under ``fname`` identifier will be called\n for every matching node, being passed a ``context`` parameter as well as\n any parameters passed from the corresponding XPath expression.\n\n If ``func`` is ``None``, the extension function will be removed.\n\n See more `in lxml documentation`_.\n\n .. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions\n\n \"\"\"\n ns_fns = etree.FunctionNamespace(None) # type: ignore[attr-defined]\n if func is not None:\n"
}
|
[
"from typing import Any",
"from typing import Callable",
"from typing import Optional",
"import re",
"from lxml import etree",
"from w3lib.html import HTML5_WHITESPACE"
] |
def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg]
"""Register a custom extension function to use in XPath expressions.
The function ``func`` registered under ``fname`` identifier will be called
for every matching node, being passed a ``context`` parameter as well as
any parameters passed from the corresponding XPath expression.
If ``func`` is ``None``, the extension function will be removed.
See more `in lxml documentation`_.
.. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions
"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE parsel/parsel/xpathfuncs.py
from typing import Any
from typing import Callable
from typing import Optional
import re
from lxml import etree
from w3lib.html import HTML5_WHITESPACE
regex = f"[{HTML5_WHITESPACE}]+"
replace_html5_whitespaces = re.compile(regex).sub
def has_class(context: Any, *classes: str) -> bool:
"""has-class function.
Return True if all ``classes`` are present in element's class attr.
"""
if not context.eval_context.get("args_checked"):
if not classes:
raise ValueError(
"XPath error: has-class must have at least 1 argument"
)
for c in classes:
if not isinstance(c, str):
raise ValueError(
"XPath error: has-class arguments must be strings"
)
context.eval_context["args_checked"] = True
node_cls = context.context_node.get("class")
if node_cls is None:
return False
node_cls = " " + node_cls + " "
node_cls = replace_html5_whitespaces(" ", node_cls)
for cls in classes:
if " " + cls + " " not in node_cls:
return False
return True
def setup() -> None:
set_xpathfunc("has-class", has_class)
Based on the information above, please complete the function in the current file parsel/parsel/xpathfuncs.py:
def set_xpathfunc(fname: str, func: Optional[Callable]) -> None: # type: ignore[type-arg]
"""Register a custom extension function to use in XPath expressions.
The function ``func`` registered under ``fname`` identifier will be called
for every matching node, being passed a ``context`` parameter as well as
any parameters passed from the corresponding XPath expression.
If ``func`` is ``None``, the extension function will be removed.
See more `in lxml documentation`_.
.. _`in lxml documentation`: https://lxml.de/extensions.html#xpath-extension-functions
"""
|
set_xpathfunc
|
parsel/parsel/xpathfuncs.py
|
def setup() -> None:
set_xpathfunc("has-class", has_class)
|
dominate.dom_tag._get_thread_context
|
function
|
Text-Processing/dominate
|
Text-Processing/dominate/dominate/dom_tag.py
|
[
47,
47
] |
[
48,
51
] |
{
"Arguments": ":param: No input parameters.\n:return: Integer. The hash value of the current thread context.",
"Functionality": "This function returns the hash value of the current thread context. It first creates a list of the current thread and greenlet (if available) and then returns the hash value of the tuple of the context list."
}
|
[
"tests/test_dom_tag.py::test___get_thread_context"
] | 2
|
_get_thread_context@dominate/dominate/dom_tag.py
|
{
"code": "def _get_thread_context():\n context = [threading.current_thread()]\n if greenlet:\n context.append(greenlet.getcurrent())\n return hash(tuple(context))",
"description": "DOCSTRING",
"file_path": "dominate/dominate/dom_tag.py",
"incoming_calls": [
"get_current@dominate/dominate/dom_tag.py",
"dom_tag._add_to_ctx@dominate/dominate/dom_tag.py",
"dom_tag.__enter__@dominate/dominate/dom_tag.py",
"dom_tag.__exit__@dominate/dominate/dom_tag.py",
"dom_tag.add@dominate/dominate/dom_tag.py",
"test___get_thread_context@dominate/tests/test_dom_tag.py"
],
"name": "_get_thread_context",
"signature": "def _get_thread_context():\n"
}
|
[
"from . import util",
"from collections import defaultdict",
"from collections import namedtuple",
"from collections.abc import Callable",
"from functools import wraps",
"import copy",
"import greenlet",
"import numbers",
"import threading"
] |
def _get_thread_context():
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE dominate/dominate/util.py
str_escape = escape
#CURRENT FILE dominate/dominate/dom_tag.py
from . import util
from collections import defaultdict
from collections import namedtuple
from collections.abc import Callable
from functools import wraps
import copy
import greenlet
import numbers
import threading
class container(dom_tag):
'''
Contains multiple elements, but does not add a level
'''
is_inline = True
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
inline = self._render_children(sb, indent_level, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * (indent_level - 1))
return sb
class lazy(dom_tag):
'''
delays function execution until rendered
'''
def __new__(_cls, *args, **kwargs):
'''
Need to reset this special method or else
dom_tag will think it's being used as a dectorator.
This means lazy() can't be used as a dectorator, but
thinking about when you might want that just confuses me.
'''
return object.__new__(_cls)
def __init__(self, func, *args, **kwargs):
super(lazy, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def _render(self, sb, *a, **kw):
r = self.func(*self.args, **self.kwargs)
sb.append(str(r))
class text(dom_tag):
'''
Just a string. Useful for inside context managers
'''
is_pretty = False
is_inline = True
def __init__(self, _text, escape=True):
super(text, self).__init__()
self.escape = escape
if escape:
self.text = str_escape(_text)
else:
self.text = _text
def _render(self, sb, *a, **kw):
sb.append(self.text)
return sb
def escape(data, quote=True): # stolen from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML document
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
def include(f):
'''
includes the contents of a file on disk.
takes a filename
'''
fl = open(f, 'r')
data = fl.read()
fl.close()
return raw(data)
def raw(s):
'''
Inserts a raw string into the DOM. Unsafe. Alias for text(x, escape=False)
'''
return text(s, escape=False)
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8')
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
class dom_tag(object):
is_single = False # Tag does not require matching end tag (ex. <hr/>)
is_pretty = True # Text inside the tag should be left as-is (ex. <pre>)
# otherwise, text will be escaped() and whitespace may be
# modified
is_inline = False
def __new__(_cls, *args, **kwargs):
'''
Check if bare tag is being used a a decorator
(called with a single function arg).
decorate the function and return
'''
if len(args) == 1 and isinstance(args[0], Callable) \
and not isinstance(args[0], dom_tag) and not kwargs:
wrapped = args[0]
@wraps(wrapped)
def f(*args, **kwargs):
with _cls() as _tag:
return wrapped(*args, **kwargs) or _tag
return f
return object.__new__(_cls)
def __init__(self, *args, **kwargs):
'''
Creates a new tag. Child tags should be passed as arguments and attributes
should be passed as keyword arguments.
There is a non-rendering attribute which controls how the tag renders:
* `__inline` - Boolean value. If True renders all children tags on the same
line.
'''
self.attributes = {}
self.children = []
self.parent = None
# Does not insert newlines on all children if True (recursive attribute)
self.is_inline = kwargs.pop('__inline', self.is_inline)
self.is_pretty = kwargs.pop('__pretty', self.is_pretty)
#Add child elements
if args:
self.add(*args)
for attr, value in kwargs.items():
self.set_attribute(*type(self).clean_pair(attr, value))
self._ctx = None
self._add_to_ctx()
# context manager
frame = namedtuple('frame', ['tag', 'items', 'used'])
# stack of frames
_with_contexts = defaultdict(list)
def _add_to_ctx(self):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
self._ctx = stack[-1]
stack[-1].items.append(self)
def __enter__(self):
stack = dom_tag._with_contexts[_get_thread_context()]
stack.append(dom_tag.frame(self, [], set()))
return self
def __exit__(self, type, value, traceback):
thread_id = _get_thread_context()
stack = dom_tag._with_contexts[thread_id]
frame = stack.pop()
for item in frame.items:
if item in frame.used: continue
self.add(item)
if not stack:
del dom_tag._with_contexts[thread_id]
def __call__(self, func):
'''
tag instance is being used as a decorator.
wrap func to make a copy of this tag
'''
# remove decorator from its context so it doesn't
# get added in where it was defined
if self._ctx:
self._ctx.used.add(self)
@wraps(func)
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
return f
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
__setitem__ = set_attribute
def delete_attribute(self, key):
if isinstance(key, int):
del self.children[key:key+1]
else:
del self.attributes[key]
__delitem__ = delete_attribute
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = util.escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
stack = dom_tag._with_contexts.get(_get_thread_context(), [])
for s in stack:
s.used.add(obj)
self.children.append(obj)
obj.parent = self
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args
def add_raw_string(self, s):
self.children.append(s)
def remove(self, obj):
self.children.remove(obj)
def clear(self):
for i in self.children:
if isinstance(i, dom_tag) and i.parent is self:
i.parent = None
self.children = []
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results
def __getitem__(self, key):
'''
Returns the stored value of the specified attribute or child
(if it exists).
'''
if isinstance(key, int):
# Children are accessed using integers
try:
return object.__getattribute__(self, 'children')[key]
except IndexError:
raise IndexError('Child with index "%s" does not exist.' % key)
elif isinstance(key, basestring):
# Attributes are accessed using strings
try:
return object.__getattribute__(self, 'attributes')[key]
except KeyError:
raise AttributeError('Attribute "%s" does not exist.' % key)
else:
raise TypeError('Only integer and string types are valid for accessing '
'child tags and attributes, respectively.')
__getattr__ = __getitem__
def __len__(self):
'''
Number of child elements.
'''
return len(self.children)
def __bool__(self):
'''
Hack for "if x" and __len__
'''
return True
__nonzero__ = __bool__
def __iter__(self):
'''
Iterates over child elements.
'''
return self.children.__iter__()
def __contains__(self, item):
'''
Checks recursively if item is in children tree.
Accepts both a string and a class.
'''
return bool(self.get(item))
def __iadd__(self, obj):
'''
Reflexive binary addition simply adds tag as a child.
'''
self.add(obj)
return self
# String and unicode representations are the same as render()
def __unicode__(self):
return self.render()
__str__ = __unicode__
def render(self, indent=' ', pretty=True, xhtml=False):
data = self._render([], 0, indent, pretty, xhtml)
return u''.join(data)
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
pretty = pretty and self.is_pretty
name = getattr(self, 'tagname', type(self).__name__)
# Workaround for python keywords and standard classes/methods
# (del, object, input)
if name[-1] == '_':
name = name[:-1]
# open tag
sb.append('<')
sb.append(name)
for attribute, value in sorted(self.attributes.items()):
if value in (False, None):
continue
val = unicode(value) if isinstance(value, util.text) and not value.escape else util.escape(unicode(value), True)
sb.append(' %s="%s"' % (attribute, val))
sb.append(' />' if self.is_single and xhtml else '>')
if self.is_single:
return sb
inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * indent_level)
# close tag
sb.append('</')
sb.append(name)
sb.append('>')
return sb
def _render_children(self, sb, indent_level, indent_str, pretty, xhtml):
inline = True
for child in self.children:
if isinstance(child, dom_tag):
if pretty and not child.is_inline:
inline = False
sb.append('\n')
sb.append(indent_str * indent_level)
child._render(sb, indent_level, indent_str, pretty, xhtml)
else:
sb.append(unicode(child))
return inline
def __repr__(self):
name = '%s.%s' % (self.__module__, type(self).__name__)
attributes_len = len(self.attributes)
attributes = '%s attribute' % attributes_len
if attributes_len != 1: attributes += 's'
children_len = len(self.children)
children = '%s child' % children_len
if children_len != 1: children += 'ren'
return '<%s at %x: %s, %s>' % (name, id(self), attributes, children)
@staticmethod
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'klass': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
'phor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')])
if attribute in set(['http_equiv']) or special_prefix:
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
@classmethod
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
# Ignore `if value is False`: this is filtered out in render()
return (attribute, value)
_get_current_none = object()
def attr(*args, **kwargs):
'''
Set attributes on the current active tag context
'''
c = get_current()
dicts = args + (kwargs,)
for d in dicts:
for attr, value in d.items():
c.set_attribute(*dom_tag.clean_pair(attr, value))
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
def get_current(default=_get_current_none):
'''
get the current tag being used as a with context or decorated function.
if no context is active, raises ValueError, or returns the default, if provided
'''
h = _get_thread_context()
ctx = dom_tag._with_contexts.get(h, None)
if ctx:
return ctx[-1].tag
if default is _get_current_none:
raise ValueError('no current context')
return default
Based on the information above, please complete the function in the current file dominate/dominate/dom_tag.py:
def _get_thread_context():
|
_get_thread_context
|
dominate/dominate/dom_tag.py
|
def get_current(default=_get_current_none):
'''
get the current tag being used as a with context or decorated function.
if no context is active, raises ValueError, or returns the default, if provided
'''
h = _get_thread_context()
ctx = dom_tag._with_contexts.get(h, None)
if ctx:
return ctx[-1].tag
if default is _get_current_none:
raise ValueError('no current context')
return default
|
dominate.util.system
|
function
|
Text-Processing/dominate
|
Text-Processing/dominate/dominate/util.py
|
[
45,
45
] |
[
49,
52
] |
{
"Arguments": ":param cmd: String. The system command to be executed.\n:param data: Bytes. Optional input data to be passed to the command.\n:return: String. The output of the system command as a decoded string.",
"Functionality": "This function runs a system command and returns the output as a string. It uses the subprocess module to run the command and capture the output."
}
|
[
"tests/test_utils.py::test_system"
] | 2
|
system@dominate/dominate/util.py
|
{
"code": "def system(cmd, data=None):\n '''\n pipes the output of a program\n '''\n import subprocess\n s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n out, err = s.communicate(data)\n return out.decode('utf8')",
"description": "pipes the output of a program",
"file_path": "dominate/dominate/util.py",
"incoming_calls": [
"test_system@dominate/tests/test_utils.py"
],
"name": "system",
"signature": "def system(cmd, data=None):\n"
}
|
[
"from .dom_tag import dom_tag",
"import re"
] |
def system(cmd, data=None):
'''
pipes the output of a program
'''
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE dominate/dominate/dom_tag.py
#CURRENT FILE dominate/dominate/util.py
from .dom_tag import dom_tag
import re
class container(dom_tag):
'''
Contains multiple elements, but does not add a level
'''
is_inline = True
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
inline = self._render_children(sb, indent_level, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * (indent_level - 1))
return sb
class lazy(dom_tag):
'''
delays function execution until rendered
'''
def __new__(_cls, *args, **kwargs):
'''
Need to reset this special method or else
dom_tag will think it's being used as a dectorator.
This means lazy() can't be used as a dectorator, but
thinking about when you might want that just confuses me.
'''
return object.__new__(_cls)
def __init__(self, func, *args, **kwargs):
super(lazy, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def _render(self, sb, *a, **kw):
r = self.func(*self.args, **self.kwargs)
sb.append(str(r))
class text(dom_tag):
'''
Just a string. Useful for inside context managers
'''
is_pretty = False
is_inline = True
def __init__(self, _text, escape=True):
super(text, self).__init__()
self.escape = escape
if escape:
self.text = str_escape(_text)
else:
self.text = _text
def _render(self, sb, *a, **kw):
sb.append(self.text)
return sb
_replace_map = dict((c, '%%%2X' % ord(c)) for c in _reserved)
str_escape = escape
def escape(data, quote=True): # stolen from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML document
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
def include(f):
'''
includes the contents of a file on disk.
takes a filename
'''
fl = open(f, 'r')
data = fl.read()
fl.close()
return raw(data)
def raw(s):
'''
Inserts a raw string into the DOM. Unsafe. Alias for text(x, escape=False)
'''
return text(s, escape=False)
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
def url_escape(data):
return ''.join(_replace_map.get(c, c) for c in data)
def url_unescape(data):
return re.sub('%([0-9a-fA-F]{2})',
lambda m: unichr(int(m.group(1), 16)), data)
class dom_tag(object):
is_single = False # Tag does not require matching end tag (ex. <hr/>)
is_pretty = True # Text inside the tag should be left as-is (ex. <pre>)
# otherwise, text will be escaped() and whitespace may be
# modified
is_inline = False
def __new__(_cls, *args, **kwargs):
'''
Check if bare tag is being used a a decorator
(called with a single function arg).
decorate the function and return
'''
if len(args) == 1 and isinstance(args[0], Callable) \
and not isinstance(args[0], dom_tag) and not kwargs:
wrapped = args[0]
@wraps(wrapped)
def f(*args, **kwargs):
with _cls() as _tag:
return wrapped(*args, **kwargs) or _tag
return f
return object.__new__(_cls)
def __init__(self, *args, **kwargs):
'''
Creates a new tag. Child tags should be passed as arguments and attributes
should be passed as keyword arguments.
There is a non-rendering attribute which controls how the tag renders:
* `__inline` - Boolean value. If True renders all children tags on the same
line.
'''
self.attributes = {}
self.children = []
self.parent = None
# Does not insert newlines on all children if True (recursive attribute)
self.is_inline = kwargs.pop('__inline', self.is_inline)
self.is_pretty = kwargs.pop('__pretty', self.is_pretty)
#Add child elements
if args:
self.add(*args)
for attr, value in kwargs.items():
self.set_attribute(*type(self).clean_pair(attr, value))
self._ctx = None
self._add_to_ctx()
# context manager
frame = namedtuple('frame', ['tag', 'items', 'used'])
# stack of frames
_with_contexts = defaultdict(list)
def _add_to_ctx(self):
stack = dom_tag._with_contexts.get(_get_thread_context())
if stack:
self._ctx = stack[-1]
stack[-1].items.append(self)
def __enter__(self):
stack = dom_tag._with_contexts[_get_thread_context()]
stack.append(dom_tag.frame(self, [], set()))
return self
def __exit__(self, type, value, traceback):
thread_id = _get_thread_context()
stack = dom_tag._with_contexts[thread_id]
frame = stack.pop()
for item in frame.items:
if item in frame.used: continue
self.add(item)
if not stack:
del dom_tag._with_contexts[thread_id]
def __call__(self, func):
'''
tag instance is being used as a decorator.
wrap func to make a copy of this tag
'''
# remove decorator from its context so it doesn't
# get added in where it was defined
if self._ctx:
self._ctx.used.add(self)
@wraps(func)
def f(*args, **kwargs):
tag = copy.deepcopy(self)
tag._add_to_ctx()
with tag:
return func(*args, **kwargs) or tag
return f
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
__setitem__ = set_attribute
def delete_attribute(self, key):
if isinstance(key, int):
del self.children[key:key+1]
else:
del self.attributes[key]
__delitem__ = delete_attribute
def add(self, *args):
'''
Add new child tags.
'''
for obj in args:
if isinstance(obj, numbers.Number):
# Convert to string so we fall into next if block
obj = str(obj)
if isinstance(obj, basestring):
obj = util.escape(obj)
self.children.append(obj)
elif isinstance(obj, dom_tag):
stack = dom_tag._with_contexts.get(_get_thread_context(), [])
for s in stack:
s.used.add(obj)
self.children.append(obj)
obj.parent = self
elif isinstance(obj, dict):
for attr, value in obj.items():
self.set_attribute(*dom_tag.clean_pair(attr, value))
elif hasattr(obj, '__iter__'):
for subobj in obj:
self.add(subobj)
else: # wtf is it?
raise ValueError('%r not a tag or string.' % obj)
if len(args) == 1:
return args[0]
return args
def add_raw_string(self, s):
self.children.append(s)
def remove(self, obj):
self.children.remove(obj)
def clear(self):
for i in self.children:
if isinstance(i, dom_tag) and i.parent is self:
i.parent = None
self.children = []
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results
def __getitem__(self, key):
'''
Returns the stored value of the specified attribute or child
(if it exists).
'''
if isinstance(key, int):
# Children are accessed using integers
try:
return object.__getattribute__(self, 'children')[key]
except IndexError:
raise IndexError('Child with index "%s" does not exist.' % key)
elif isinstance(key, basestring):
# Attributes are accessed using strings
try:
return object.__getattribute__(self, 'attributes')[key]
except KeyError:
raise AttributeError('Attribute "%s" does not exist.' % key)
else:
raise TypeError('Only integer and string types are valid for accessing '
'child tags and attributes, respectively.')
__getattr__ = __getitem__
def __len__(self):
'''
Number of child elements.
'''
return len(self.children)
def __bool__(self):
'''
Hack for "if x" and __len__
'''
return True
__nonzero__ = __bool__
def __iter__(self):
'''
Iterates over child elements.
'''
return self.children.__iter__()
def __contains__(self, item):
'''
Checks recursively if item is in children tree.
Accepts both a string and a class.
'''
return bool(self.get(item))
def __iadd__(self, obj):
'''
Reflexive binary addition simply adds tag as a child.
'''
self.add(obj)
return self
# String and unicode representations are the same as render()
def __unicode__(self):
return self.render()
__str__ = __unicode__
def render(self, indent=' ', pretty=True, xhtml=False):
data = self._render([], 0, indent, pretty, xhtml)
return u''.join(data)
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
pretty = pretty and self.is_pretty
name = getattr(self, 'tagname', type(self).__name__)
# Workaround for python keywords and standard classes/methods
# (del, object, input)
if name[-1] == '_':
name = name[:-1]
# open tag
sb.append('<')
sb.append(name)
for attribute, value in sorted(self.attributes.items()):
if value in (False, None):
continue
val = unicode(value) if isinstance(value, util.text) and not value.escape else util.escape(unicode(value), True)
sb.append(' %s="%s"' % (attribute, val))
sb.append(' />' if self.is_single and xhtml else '>')
if self.is_single:
return sb
inline = self._render_children(sb, indent_level + 1, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * indent_level)
# close tag
sb.append('</')
sb.append(name)
sb.append('>')
return sb
def _render_children(self, sb, indent_level, indent_str, pretty, xhtml):
inline = True
for child in self.children:
if isinstance(child, dom_tag):
if pretty and not child.is_inline:
inline = False
sb.append('\n')
sb.append(indent_str * indent_level)
child._render(sb, indent_level, indent_str, pretty, xhtml)
else:
sb.append(unicode(child))
return inline
def __repr__(self):
name = '%s.%s' % (self.__module__, type(self).__name__)
attributes_len = len(self.attributes)
attributes = '%s attribute' % attributes_len
if attributes_len != 1: attributes += 's'
children_len = len(self.children)
children = '%s child' % children_len
if children_len != 1: children += 'ren'
return '<%s at %x: %s, %s>' % (name, id(self), attributes, children)
@staticmethod
def clean_attribute(attribute):
'''
Normalize attribute names for shorthand and work arounds for limitations
in Python's syntax
'''
# Shorthand
attribute = {
'cls': 'class',
'className': 'class',
'class_name': 'class',
'klass': 'class',
'fr': 'for',
'html_for': 'for',
'htmlFor': 'for',
'phor': 'for',
}.get(attribute, attribute)
# Workaround for Python's reserved words
if attribute[0] == '_':
attribute = attribute[1:]
# Workaround for dash
special_prefix = any([attribute.startswith(x) for x in ('data_', 'aria_')])
if attribute in set(['http_equiv']) or special_prefix:
attribute = attribute.replace('_', '-').lower()
# Workaround for colon
if attribute.split('_')[0] in ('xlink', 'xml', 'xmlns'):
attribute = attribute.replace('_', ':', 1).lower()
return attribute
@classmethod
def clean_pair(cls, attribute, value):
'''
This will call `clean_attribute` on the attribute and also allows for the
creation of boolean attributes.
Ex. input(selected=True) is equivalent to input(selected="selected")
'''
attribute = cls.clean_attribute(attribute)
# Check for boolean attributes
# (i.e. selected=True becomes selected="selected")
if value is True:
value = attribute
# Ignore `if value is False`: this is filtered out in render()
return (attribute, value)
Based on the information above, please complete the function in the current file dominate/dominate/util.py:
def system(cmd, data=None):
'''
pipes the output of a program
'''
|
system
|
dominate/dominate/util.py
|
def test_system():
d = div()
d += util.system('echo Hello World')
assert d.render().replace('\r\n', '\n') == '<div>Hello World\n</div>'
|
dominate.util.url_unescape
|
function
|
Text-Processing/dominate
|
Text-Processing/dominate/dominate/util.py
|
[
118,
118
] |
[
119,
120
] |
{
"Arguments": ":param data: String. The URL-encoded string to be unescaped.\n:return: String. The unescaped string.",
"Functionality": "This function takes a string as input and unescapes any URL-encoded characters in the string."
}
|
[
"tests/test_utils.py::test_url"
] | 2
|
url_unescape@dominate/dominate/util.py
|
{
"code": "def url_unescape(data):\n return re.sub('%([0-9a-fA-F]{2})',\n lambda m: unichr(int(m.group(1), 16)), data)",
"description": "DOCSTRING",
"file_path": "dominate/dominate/util.py",
"incoming_calls": [
"test_url@dominate/tests/test_utils.py"
],
"name": "url_unescape",
"signature": "def url_unescape(data):\n"
}
|
[
"from .dom_tag import dom_tag",
"import re"
] |
def url_unescape(data):
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE dominate/dominate/util.py
from .dom_tag import dom_tag
import re
class container(dom_tag):
'''
Contains multiple elements, but does not add a level
'''
is_inline = True
def _render(self, sb, indent_level, indent_str, pretty, xhtml):
inline = self._render_children(sb, indent_level, indent_str, pretty, xhtml)
if pretty and not inline:
sb.append('\n')
sb.append(indent_str * (indent_level - 1))
return sb
class lazy(dom_tag):
'''
delays function execution until rendered
'''
def __new__(_cls, *args, **kwargs):
'''
Need to reset this special method or else
dom_tag will think it's being used as a dectorator.
This means lazy() can't be used as a dectorator, but
thinking about when you might want that just confuses me.
'''
return object.__new__(_cls)
def __init__(self, func, *args, **kwargs):
super(lazy, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def _render(self, sb, *a, **kw):
r = self.func(*self.args, **self.kwargs)
sb.append(str(r))
class text(dom_tag):
'''
Just a string. Useful for inside context managers
'''
is_pretty = False
is_inline = True
def __init__(self, _text, escape=True):
super(text, self).__init__()
self.escape = escape
if escape:
self.text = str_escape(_text)
else:
self.text = _text
def _render(self, sb, *a, **kw):
sb.append(self.text)
return sb
_replace_map = dict((c, '%%%2X' % ord(c)) for c in _reserved)
_unescape = {
'quot': 34,
'amp': 38,
'lt': 60,
'gt': 62,
'nbsp': 32,
# more here
# http://www.w3.org/TR/html4/sgml/entities.html
'yuml': 255,
}
str_escape = escape
def escape(data, quote=True): # stolen from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML document
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
def raw(s):
'''
Inserts a raw string into the DOM. Unsafe. Alias for text(x, escape=False)
'''
return text(s, escape=False)
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8')
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile(r'&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
def url_escape(data):
return ''.join(_replace_map.get(c, c) for c in data)
Based on the information above, please complete the function in the current file dominate/dominate/util.py:
def url_unescape(data):
|
url_unescape
|
dominate/dominate/util.py
|
def test_url():
assert util.url_escape('hi there?') == 'hi%20there%3F'
assert util.url_unescape('hi%20there%3f') == 'hi there?'
|
rows.fields.DatetimeField.serialize
|
method
|
Text-Processing/rows
|
Text-Processing/rows/rows/fields.py
|
[
390,
390
] |
[
391,
394
] |
{
"Arguments": ":param cls: Class. The class instance.\n:param value: Datetime. The datetime value to be serialized.\n:param *args: Additional positional arguments.\n:param **kwargs: Additional keyword arguments.\n:return: String. The serialized datetime value in ISO 8601 format.",
"Functionality": "Serialize the given datetime value into a string in ISO 8601 format."
}
|
[
"tests/tests_fields.py::FieldsTestCase::test_DatetimeField"
] | 8
|
DatetimeField.serialize@rows/rows/fields.py
|
{
"code": "def serialize(cls, value, *args, **kwargs):\n if value is None:\n return \"\"\n\n return six.text_type(value.isoformat())",
"description": "DOCSTRING",
"file_path": "rows/rows/fields.py",
"incoming_calls": [],
"name": "serialize",
"signature": "def serialize(cls, value, *args, **kwargs):\n"
}
|
[
"from base64 import b64decode",
"from base64 import b64encode",
"import datetime",
"import json",
"import re",
"from __future__ import unicode_literals",
"from collections import OrderedDict",
"from collections import defaultdict",
"from decimal import Decimal",
"from decimal import InvalidOperation",
"from unicodedata import normalize",
"import binascii",
"import locale",
"import six"
] |
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE natasha/natasha/norm.py
#CURRENT FILE rows/rows/fields.py
from base64 import b64decode
from base64 import b64encode
import datetime
import json
import re
from __future__ import unicode_literals
from collections import OrderedDict
from collections import defaultdict
from decimal import Decimal
from decimal import InvalidOperation
from unicodedata import normalize
import binascii
import locale
import six
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = (six.binary_type,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
TYPE = (bool,)
SERIALIZED_VALUES = {True: "true", False: "false", None: ""}
TRUE_VALUES = ("true", "yes")
FALSE_VALUES = ("false", "no")
@classmethod
def serialize(cls, value, *args, **kwargs):
# TODO: should we serialize `None` as well or give it to the plugin?
return cls.SERIALIZED_VALUES[value]
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(BoolField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value).lower()
if value in cls.TRUE_VALUES:
return True
elif value in cls.FALSE_VALUES:
return False
else:
raise ValueError("Value is not boolean")
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.date,)
INPUT_FORMAT = "%Y-%m-%d"
OUTPUT_FORMAT = "%Y-%m-%d"
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
TYPE = (Decimal,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
value_as_string = six.text_type(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get("grouping", None)
has_decimal_places = value_as_string.find(".") != -1
if not has_decimal_places:
string_format = "%d"
else:
decimal_places = len(value_as_string.split(".")[1])
string_format = "%.{}f".format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DecimalField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif type(value) in (int, float):
return Decimal(six.text_type(value))
if SHOULD_NOT_USE_LOCALE:
try:
return Decimal(value)
except InvalidOperation:
value_error(value, cls)
else:
locale_vars = locale.localeconv()
decimal_separator = locale_vars["decimal_point"]
interesting_vars = (
"decimal_point",
"mon_decimal_point",
"mon_thousands_sep",
"negative_sign",
"positive_sign",
"thousands_sep",
)
chars = (
locale_vars[x].replace(".", r"\.").replace("-", r"\-")
for x in interesting_vars
)
interesting_chars = "".join(set(chars))
regexp = re.compile(r"[^0-9{} ]".format(interesting_chars))
value = as_string(value)
if regexp.findall(value):
value_error(value, cls)
parts = [
REGEXP_ONLY_NUMBERS.subn("", number)[0]
for number in value.split(decimal_separator)
]
if len(parts) > 2:
raise ValueError("Can't deserialize with this locale.")
try:
value = Decimal(parts[0])
if len(parts) == 2:
decimal_places = len(parts[1])
value = value + (Decimal(parts[1]) / (10 ** decimal_places))
except InvalidOperation:
value_error(value, cls)
return value
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(EmailField, cls).deserialize(value)
if value is None or not value.strip():
return None
result = cls.EMAIL_REGEXP.findall(value)
if not result:
value_error(value, cls)
else:
return result[0]
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
if value is None:
value = ""
return value
@classmethod
def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = (float,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = (int,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
TYPE = (list, dict)
@classmethod
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
elif value == Decimal("0"):
return "0.00%"
value = Decimal(six.text_type(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return "{}%".format(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
value = as_string(value)
if "%" not in value:
value_error(value, cls)
value = value.replace("%", "")
return super(PercentField, cls).deserialize(value) / 100
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
class TypeDetector(object):
"""Detect data types based on a list of Field classes"""
def __init__(
self,
field_names=None,
field_types=DEFAULT_TYPES,
fallback_type=TextField,
skip_indexes=None,
):
self.field_names = field_names or []
self.field_types = list(field_types)
self.fallback_type = fallback_type
self._possible_types = defaultdict(lambda: list(self.field_types))
self._samples = []
self._skip = skip_indexes or tuple()
def check_type(self, index, value):
for type_ in self._possible_types[index][:]:
try:
type_.deserialize(value)
except (ValueError, TypeError):
self._possible_types[index].remove(type_)
def process_row(self, row):
for index, value in enumerate(row):
if index in self._skip:
continue
self.check_type(index, value)
def feed(self, data):
for row in data:
self.process_row(row)
def priority(self, *field_types):
"""Decide the priority between each possible type"""
return field_types[0] if field_types else self.fallback_type
@property
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
NULL = ("-", "null", "none", "nil", "n/a", "na")
NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na")
SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
"""Detect column types (or "where the magic happens")"""
# TODO: look strategy of csv.Sniffer.has_header
# TODO: may receive 'type hints'
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields
def get_items(*indexes):
"""Return a callable that fetches the given indexes of an object
Always return a tuple even when len(indexes) == 1.
Similar to `operator.itemgetter`, but will insert `None` when the object
does not have the desired index (instead of raising IndexError).
"""
return lambda obj: tuple(
obj[index] if len(obj) > index else None for index in indexes
)
def identify_type(value):
"""Identify the field type for a specific value"""
return detect_types(["name"], [[value]])["name"]
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def make_header(field_names, permit_not=False):
"""Return unique and slugged field names."""
slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^"
header = [
slug(field_name, permitted_chars=slug_chars) for field_name in field_names
]
result = []
for index, field_name in enumerate(header):
if not field_name:
field_name = "field_{}".format(index)
elif field_name[0].isdigit():
field_name = "field_{}".format(field_name)
if field_name in result:
field_name = make_unique_name(
name=field_name, existing_names=result, start=2
)
result.append(field_name)
return result
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name
def slug(text, separator="_", permitted_chars=SLUG_CHARS, replace_with_separator=" -_"):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace spaces and other chars with separator
# Example: u' ALVARO justen% ' -> u'_ALVARO__justen%_'
for char in replace_with_separator:
text = text.replace(char, separator)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
text = "".join(char for char in text if char in permitted_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
double_separator = separator + separator
while double_separator in text:
text = text.replace(double_separator, separator)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator)
def unique_values(values):
result = []
for value in values:
if not is_null(value) and value not in result:
result.append(value)
return result
def value_error(value, cls):
value = repr(value)
if len(value) > 50:
value = value[:50] + "..."
raise ValueError("Value '{}' can't be {}".format(value, cls.__name__))
def normalize(vocab, tokens):
words = inflect_words(vocab, tokens)
words = recover_shapes(words, tokens)
return recover_spaces(words, tokens)
Based on the information above, please complete the function in the current file rows/rows/fields.py:
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
serialize
|
rows/rows/fields.py
| |
rows.fields.Field.serialize
|
method
|
Text-Processing/rows
|
Text-Processing/rows/rows/fields.py
|
[
77,
77
] |
[
84,
86
] |
{
"Arguments": ":param cls: Class. The class instance.\n:param value: Any. The value to be serialized.\n:param *args: Tuple. Additional positional arguments.\n:param **kwargs: Dictionary. Additional keyword arguments.\n:return: Any. The serialized value.",
"Functionality": "This function serializes a value to be exported. It should always return a unicode value, except for BinaryField."
}
|
[
"tests/tests_fields.py::FieldsTestCase::test_Field",
"tests/tests_fields.py::FieldsTestCase::test_TextField"
] | 8
|
Field.serialize@rows/rows/fields.py
|
{
"code": "def serialize(cls, value, *args, **kwargs):\n \"\"\"Serialize a value to be exported\n\n `cls.serialize` should always return an unicode value, except for\n BinaryField\n \"\"\"\n\n if value is None:\n value = \"\"\n return value",
"description": "Serialize a value to be exported\n\n`cls.serialize` should always return an unicode value, except for\nBinaryField",
"file_path": "rows/rows/fields.py",
"incoming_calls": [],
"name": "serialize",
"signature": "def serialize(cls, value, *args, **kwargs):\n"
}
|
[
"from base64 import b64decode",
"from base64 import b64encode",
"import datetime",
"import json",
"import re",
"from __future__ import unicode_literals",
"from collections import OrderedDict",
"from collections import defaultdict",
"from decimal import Decimal",
"from decimal import InvalidOperation",
"from unicodedata import normalize",
"import binascii",
"import locale",
"import six"
] |
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE rows/rows/fields.py
from base64 import b64decode
from base64 import b64encode
import datetime
import json
import re
from __future__ import unicode_literals
from collections import OrderedDict
from collections import defaultdict
from decimal import Decimal
from decimal import InvalidOperation
from unicodedata import normalize
import binascii
import locale
import six
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = (six.binary_type,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
TYPE = (bool,)
SERIALIZED_VALUES = {True: "true", False: "false", None: ""}
TRUE_VALUES = ("true", "yes")
FALSE_VALUES = ("false", "no")
@classmethod
def serialize(cls, value, *args, **kwargs):
# TODO: should we serialize `None` as well or give it to the plugin?
return cls.SERIALIZED_VALUES[value]
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(BoolField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value).lower()
if value in cls.TRUE_VALUES:
return True
elif value in cls.FALSE_VALUES:
return False
else:
raise ValueError("Value is not boolean")
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.date,)
INPUT_FORMAT = "%Y-%m-%d"
OUTPUT_FORMAT = "%Y-%m-%d"
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.isoformat())
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
value_error(value, cls)
else:
return datetime.datetime(*[int(x) for x in groups[0]])
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
TYPE = (Decimal,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
value_as_string = six.text_type(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get("grouping", None)
has_decimal_places = value_as_string.find(".") != -1
if not has_decimal_places:
string_format = "%d"
else:
decimal_places = len(value_as_string.split(".")[1])
string_format = "%.{}f".format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DecimalField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif type(value) in (int, float):
return Decimal(six.text_type(value))
if SHOULD_NOT_USE_LOCALE:
try:
return Decimal(value)
except InvalidOperation:
value_error(value, cls)
else:
locale_vars = locale.localeconv()
decimal_separator = locale_vars["decimal_point"]
interesting_vars = (
"decimal_point",
"mon_decimal_point",
"mon_thousands_sep",
"negative_sign",
"positive_sign",
"thousands_sep",
)
chars = (
locale_vars[x].replace(".", r"\.").replace("-", r"\-")
for x in interesting_vars
)
interesting_chars = "".join(set(chars))
regexp = re.compile(r"[^0-9{} ]".format(interesting_chars))
value = as_string(value)
if regexp.findall(value):
value_error(value, cls)
parts = [
REGEXP_ONLY_NUMBERS.subn("", number)[0]
for number in value.split(decimal_separator)
]
if len(parts) > 2:
raise ValueError("Can't deserialize with this locale.")
try:
value = Decimal(parts[0])
if len(parts) == 2:
decimal_places = len(parts[1])
value = value + (Decimal(parts[1]) / (10 ** decimal_places))
except InvalidOperation:
value_error(value, cls)
return value
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(EmailField, cls).deserialize(value)
if value is None or not value.strip():
return None
result = cls.EMAIL_REGEXP.findall(value)
if not result:
value_error(value, cls)
else:
return result[0]
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = (float,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = (int,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
TYPE = (list, dict)
@classmethod
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
elif value == Decimal("0"):
return "0.00%"
value = Decimal(six.text_type(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return "{}%".format(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
value = as_string(value)
if "%" not in value:
value_error(value, cls)
value = value.replace("%", "")
return super(PercentField, cls).deserialize(value) / 100
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
NULL = ("-", "null", "none", "nil", "n/a", "na")
NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na")
SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
def identify_type(value):
"""Identify the field type for a specific value"""
return detect_types(["name"], [[value]])["name"]
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def slug(text, separator="_", permitted_chars=SLUG_CHARS, replace_with_separator=" -_"):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace spaces and other chars with separator
# Example: u' ALVARO justen% ' -> u'_ALVARO__justen%_'
for char in replace_with_separator:
text = text.replace(char, separator)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
text = "".join(char for char in text if char in permitted_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
double_separator = separator + separator
while double_separator in text:
text = text.replace(double_separator, separator)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator)
def value_error(value, cls):
value = repr(value)
if len(value) > 50:
value = value[:50] + "..."
raise ValueError("Value '{}' can't be {}".format(value, cls.__name__))
Based on the information above, please complete the function in the current file rows/rows/fields.py:
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
|
serialize
|
rows/rows/fields.py
| |
rows.fields.EmailField.serialize
|
method
|
Text-Processing/rows
|
Text-Processing/rows/rows/fields.py
|
[
438,
438
] |
[
439,
442
] |
{
"Arguments": ":param cls: Class. The class itself.\n:param value: Any. The value to be serialized.\n:param *args: Tuple. Additional positional arguments.\n:param **kwargs: Dictionary. Additional keyword arguments.\n:return: String. The serialized value.",
"Functionality": "Serialize the value of the email field. If the value is None, it returns an empty string. Otherwise, it returns the string representation of the value."
}
|
[
"tests/tests_fields.py::FieldsTestCase::test_EmailField"
] | 8
|
EmailField.serialize@rows/rows/fields.py
|
{
"code": "def serialize(cls, value, *args, **kwargs):\n if value is None:\n return \"\"\n\n return six.text_type(value)",
"description": "DOCSTRING",
"file_path": "rows/rows/fields.py",
"incoming_calls": [],
"name": "serialize",
"signature": "def serialize(cls, value, *args, **kwargs):\n"
}
|
[
"from base64 import b64decode",
"from base64 import b64encode",
"import datetime",
"import json",
"import re",
"from __future__ import unicode_literals",
"from collections import OrderedDict",
"from collections import defaultdict",
"from decimal import Decimal",
"from decimal import InvalidOperation",
"from unicodedata import normalize",
"import binascii",
"import locale",
"import six"
] |
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE natasha/natasha/norm.py
#CURRENT FILE rows/rows/fields.py
from base64 import b64decode
from base64 import b64encode
import datetime
import json
import re
from __future__ import unicode_literals
from collections import OrderedDict
from collections import defaultdict
from decimal import Decimal
from decimal import InvalidOperation
from unicodedata import normalize
import binascii
import locale
import six
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = (six.binary_type,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
class BoolField(Field):
"""Base class to representing boolean
Is not locale-aware (if you need to, please customize by changing its
attributes like `TRUE_VALUES` and `FALSE_VALUES`)
"""
TYPE = (bool,)
SERIALIZED_VALUES = {True: "true", False: "false", None: ""}
TRUE_VALUES = ("true", "yes")
FALSE_VALUES = ("false", "no")
@classmethod
def serialize(cls, value, *args, **kwargs):
# TODO: should we serialize `None` as well or give it to the plugin?
return cls.SERIALIZED_VALUES[value]
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(BoolField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value).lower()
if value in cls.TRUE_VALUES:
return True
elif value in cls.FALSE_VALUES:
return False
else:
raise ValueError("Value is not boolean")
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.date,)
INPUT_FORMAT = "%Y-%m-%d"
OUTPUT_FORMAT = "%Y-%m-%d"
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.isoformat())
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
value_error(value, cls)
else:
return datetime.datetime(*[int(x) for x in groups[0]])
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
TYPE = (Decimal,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
value_as_string = six.text_type(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get("grouping", None)
has_decimal_places = value_as_string.find(".") != -1
if not has_decimal_places:
string_format = "%d"
else:
decimal_places = len(value_as_string.split(".")[1])
string_format = "%.{}f".format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DecimalField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif type(value) in (int, float):
return Decimal(six.text_type(value))
if SHOULD_NOT_USE_LOCALE:
try:
return Decimal(value)
except InvalidOperation:
value_error(value, cls)
else:
locale_vars = locale.localeconv()
decimal_separator = locale_vars["decimal_point"]
interesting_vars = (
"decimal_point",
"mon_decimal_point",
"mon_thousands_sep",
"negative_sign",
"positive_sign",
"thousands_sep",
)
chars = (
locale_vars[x].replace(".", r"\.").replace("-", r"\-")
for x in interesting_vars
)
interesting_chars = "".join(set(chars))
regexp = re.compile(r"[^0-9{} ]".format(interesting_chars))
value = as_string(value)
if regexp.findall(value):
value_error(value, cls)
parts = [
REGEXP_ONLY_NUMBERS.subn("", number)[0]
for number in value.split(decimal_separator)
]
if len(parts) > 2:
raise ValueError("Can't deserialize with this locale.")
try:
value = Decimal(parts[0])
if len(parts) == 2:
decimal_places = len(parts[1])
value = value + (Decimal(parts[1]) / (10 ** decimal_places))
except InvalidOperation:
value_error(value, cls)
return value
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
if value is None:
value = ""
return value
@classmethod
def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = (float,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = (int,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
TYPE = (list, dict)
@classmethod
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
elif value == Decimal("0"):
return "0.00%"
value = Decimal(six.text_type(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return "{}%".format(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
value = as_string(value)
if "%" not in value:
value_error(value, cls)
value = value.replace("%", "")
return super(PercentField, cls).deserialize(value) / 100
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
class TypeDetector(object):
"""Detect data types based on a list of Field classes"""
def __init__(
self,
field_names=None,
field_types=DEFAULT_TYPES,
fallback_type=TextField,
skip_indexes=None,
):
self.field_names = field_names or []
self.field_types = list(field_types)
self.fallback_type = fallback_type
self._possible_types = defaultdict(lambda: list(self.field_types))
self._samples = []
self._skip = skip_indexes or tuple()
def check_type(self, index, value):
for type_ in self._possible_types[index][:]:
try:
type_.deserialize(value)
except (ValueError, TypeError):
self._possible_types[index].remove(type_)
def process_row(self, row):
for index, value in enumerate(row):
if index in self._skip:
continue
self.check_type(index, value)
def feed(self, data):
for row in data:
self.process_row(row)
def priority(self, *field_types):
"""Decide the priority between each possible type"""
return field_types[0] if field_types else self.fallback_type
@property
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
NULL = ("-", "null", "none", "nil", "n/a", "na")
NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na")
SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
"""Detect column types (or "where the magic happens")"""
# TODO: look strategy of csv.Sniffer.has_header
# TODO: may receive 'type hints'
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields
def get_items(*indexes):
"""Return a callable that fetches the given indexes of an object
Always return a tuple even when len(indexes) == 1.
Similar to `operator.itemgetter`, but will insert `None` when the object
does not have the desired index (instead of raising IndexError).
"""
return lambda obj: tuple(
obj[index] if len(obj) > index else None for index in indexes
)
def identify_type(value):
"""Identify the field type for a specific value"""
return detect_types(["name"], [[value]])["name"]
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def make_header(field_names, permit_not=False):
"""Return unique and slugged field names."""
slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^"
header = [
slug(field_name, permitted_chars=slug_chars) for field_name in field_names
]
result = []
for index, field_name in enumerate(header):
if not field_name:
field_name = "field_{}".format(index)
elif field_name[0].isdigit():
field_name = "field_{}".format(field_name)
if field_name in result:
field_name = make_unique_name(
name=field_name, existing_names=result, start=2
)
result.append(field_name)
return result
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name
def slug(text, separator="_", permitted_chars=SLUG_CHARS, replace_with_separator=" -_"):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace spaces and other chars with separator
# Example: u' ALVARO justen% ' -> u'_ALVARO__justen%_'
for char in replace_with_separator:
text = text.replace(char, separator)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
text = "".join(char for char in text if char in permitted_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
double_separator = separator + separator
while double_separator in text:
text = text.replace(double_separator, separator)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator)
def unique_values(values):
result = []
for value in values:
if not is_null(value) and value not in result:
result.append(value)
return result
def value_error(value, cls):
value = repr(value)
if len(value) > 50:
value = value[:50] + "..."
raise ValueError("Value '{}' can't be {}".format(value, cls.__name__))
def normalize(vocab, tokens):
words = inflect_words(vocab, tokens)
words = recover_shapes(words, tokens)
return recover_spaces(words, tokens)
Based on the information above, please complete the function in the current file rows/rows/fields.py:
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
|
serialize
|
rows/rows/fields.py
| |
rows.fields.as_string
|
function
|
Text-Processing/rows
|
Text-Processing/rows/rows/fields.py
|
[
478,
478
] |
[
479,
484
] |
{
"Arguments": ":param value: Any. The input value to be converted to a string.\n:return: String. The input value converted to a string.",
"Functionality": "Convert the input value to a string. If the input value is already a string, it returns the input value. If the input value is a binary type, it raises a ValueError."
}
|
[
"tests/tests_fields.py::FieldsFunctionsTestCase::test_as_string"
] | 4
|
as_string@rows/rows/fields.py
|
{
"code": "def as_string(value):\n if isinstance(value, six.binary_type):\n raise ValueError(\"Binary is not supported\")\n elif isinstance(value, six.text_type):\n return value\n else:\n return six.text_type(value)",
"description": "DOCSTRING",
"file_path": "rows/rows/fields.py",
"incoming_calls": [
"is_null@rows/rows/fields.py",
"BoolField.deserialize@rows/rows/fields.py",
"IntegerField.deserialize@rows/rows/fields.py",
"FloatField.deserialize@rows/rows/fields.py",
"DecimalField.deserialize@rows/rows/fields.py",
"PercentField.deserialize@rows/rows/fields.py",
"DateField.deserialize@rows/rows/fields.py",
"DatetimeField.deserialize@rows/rows/fields.py",
"TextField.deserialize@rows/rows/fields.py",
"FieldsFunctionsTestCase.test_as_string@rows/tests/tests_fields.py"
],
"name": "as_string",
"signature": "def as_string(value):\n"
}
|
[
"from base64 import b64decode",
"from base64 import b64encode",
"import datetime",
"import json",
"import re",
"from __future__ import unicode_literals",
"from collections import OrderedDict",
"from collections import defaultdict",
"from decimal import Decimal",
"from decimal import InvalidOperation",
"from unicodedata import normalize",
"import binascii",
"import locale",
"import six"
] |
def as_string(value):
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#FILE natasha/natasha/norm.py
#CURRENT FILE rows/rows/fields.py
from base64 import b64decode
from base64 import b64encode
import datetime
import json
import re
from __future__ import unicode_literals
from collections import OrderedDict
from collections import defaultdict
from decimal import Decimal
from decimal import InvalidOperation
from unicodedata import normalize
import binascii
import locale
import six
class BinaryField(Field):
"""Field class to represent byte arrays
Is not locale-aware (does not need to be)
"""
TYPE = (six.binary_type,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is not None:
if not isinstance(value, six.binary_type):
value_error(value, cls)
else:
try:
return b64encode(value).decode("ascii")
except (TypeError, binascii.Error):
return value
else:
return ""
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is not None:
if isinstance(value, six.binary_type):
return value
elif isinstance(value, six.text_type):
try:
return b64decode(value)
except (TypeError, ValueError, binascii.Error):
raise ValueError("Can't decode base64")
else:
value_error(value, cls)
else:
return b""
class DateField(Field):
"""Field class to represent date
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.date,)
INPUT_FORMAT = "%Y-%m-%d"
OUTPUT_FORMAT = "%Y-%m-%d"
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.strftime(cls.OUTPUT_FORMAT))
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DateField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
dt_object = datetime.datetime.strptime(value, cls.INPUT_FORMAT)
return datetime.date(dt_object.year, dt_object.month, dt_object.day)
class DatetimeField(Field):
"""Field class to represent date-time
Is not locale-aware (does not need to be)
"""
TYPE = (datetime.datetime,)
DATETIME_REGEXP = re.compile(
"^([0-9]{4})-([0-9]{2})-([0-9]{2})[ T]" "([0-9]{2}):([0-9]{2}):([0-9]{2})$"
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value.isoformat())
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DatetimeField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
# TODO: may use iso8601
groups = cls.DATETIME_REGEXP.findall(value)
if not groups:
value_error(value, cls)
else:
return datetime.datetime(*[int(x) for x in groups[0]])
class DecimalField(Field):
"""Field class to represent decimal data (as Python's decimal.Decimal)
Is locale-aware
"""
TYPE = (Decimal,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
value_as_string = six.text_type(value)
if SHOULD_NOT_USE_LOCALE:
return value_as_string
else:
grouping = kwargs.get("grouping", None)
has_decimal_places = value_as_string.find(".") != -1
if not has_decimal_places:
string_format = "%d"
else:
decimal_places = len(value_as_string.split(".")[1])
string_format = "%.{}f".format(decimal_places)
return locale.format(string_format, value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(DecimalField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif type(value) in (int, float):
return Decimal(six.text_type(value))
if SHOULD_NOT_USE_LOCALE:
try:
return Decimal(value)
except InvalidOperation:
value_error(value, cls)
else:
locale_vars = locale.localeconv()
decimal_separator = locale_vars["decimal_point"]
interesting_vars = (
"decimal_point",
"mon_decimal_point",
"mon_thousands_sep",
"negative_sign",
"positive_sign",
"thousands_sep",
)
chars = (
locale_vars[x].replace(".", r"\.").replace("-", r"\-")
for x in interesting_vars
)
interesting_chars = "".join(set(chars))
regexp = re.compile(r"[^0-9{} ]".format(interesting_chars))
value = as_string(value)
if regexp.findall(value):
value_error(value, cls)
parts = [
REGEXP_ONLY_NUMBERS.subn("", number)[0]
for number in value.split(decimal_separator)
]
if len(parts) > 2:
raise ValueError("Can't deserialize with this locale.")
try:
value = Decimal(parts[0])
if len(parts) == 2:
decimal_places = len(parts[1])
value = value + (Decimal(parts[1]) / (10 ** decimal_places))
except InvalidOperation:
value_error(value, cls)
return value
class EmailField(TextField):
"""Field class to represent e-mail addresses
Is not locale-aware (does not need to be)
"""
EMAIL_REGEXP = re.compile(
r"^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]+$", flags=re.IGNORECASE
)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
return six.text_type(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(EmailField, cls).deserialize(value)
if value is None or not value.strip():
return None
result = cls.EMAIL_REGEXP.findall(value)
if not result:
value_error(value, cls)
else:
return result[0]
class Field(object):
"""Base Field class - all fields should inherit from this
As the fallback for all other field types are the BinaryField, this Field
actually implements what is expected in the BinaryField
"""
TYPE = (type(None),)
@classmethod
def serialize(cls, value, *args, **kwargs):
"""Serialize a value to be exported
`cls.serialize` should always return an unicode value, except for
BinaryField
"""
if value is None:
value = ""
return value
@classmethod
def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value
class FloatField(Field):
"""Field class to represent float
Is locale-aware
"""
TYPE = (float,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%f", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(FloatField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
value = as_string(value)
if SHOULD_NOT_USE_LOCALE:
return float(value)
else:
return locale.atof(value)
class IntegerField(Field):
"""Field class to represent integer
Is locale-aware
"""
TYPE = (int,)
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
if SHOULD_NOT_USE_LOCALE:
return six.text_type(value)
else:
grouping = kwargs.get("grouping", None)
return locale.format("%d", value, grouping=grouping)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(IntegerField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
elif isinstance(value, float):
new_value = int(value)
if new_value != value:
raise ValueError("It's float, not integer")
else:
value = new_value
value = as_string(value)
if value != "0" and value.startswith("0"):
raise ValueError("It's string, not integer")
return int(value) if SHOULD_NOT_USE_LOCALE else locale.atoi(value)
class JSONField(Field):
"""Field class to represent JSON-encoded strings
Is not locale-aware (does not need to be)
"""
TYPE = (list, dict)
@classmethod
def serialize(cls, value, *args, **kwargs):
return json.dumps(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
value = super(JSONField, cls).deserialize(value)
if value is None or isinstance(value, cls.TYPE):
return value
else:
return json.loads(value)
class PercentField(DecimalField):
"""Field class to represent percent values
Is locale-aware (inherit this behaviour from `rows.DecimalField`)
"""
@classmethod
def serialize(cls, value, *args, **kwargs):
if value is None:
return ""
elif value == Decimal("0"):
return "0.00%"
value = Decimal(six.text_type(value * 100)[:-2])
value = super(PercentField, cls).serialize(value, *args, **kwargs)
return "{}%".format(value)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
value = as_string(value)
if "%" not in value:
value_error(value, cls)
value = value.replace("%", "")
return super(PercentField, cls).deserialize(value) / 100
class TextField(Field):
"""Field class to represent unicode strings
Is not locale-aware (does not need to be)
"""
TYPE = (six.text_type,)
@classmethod
def deserialize(cls, value, *args, **kwargs):
if value is None or isinstance(value, cls.TYPE):
return value
else:
return as_string(value)
class TypeDetector(object):
"""Detect data types based on a list of Field classes"""
def __init__(
self,
field_names=None,
field_types=DEFAULT_TYPES,
fallback_type=TextField,
skip_indexes=None,
):
self.field_names = field_names or []
self.field_types = list(field_types)
self.fallback_type = fallback_type
self._possible_types = defaultdict(lambda: list(self.field_types))
self._samples = []
self._skip = skip_indexes or tuple()
def check_type(self, index, value):
for type_ in self._possible_types[index][:]:
try:
type_.deserialize(value)
except (ValueError, TypeError):
self._possible_types[index].remove(type_)
def process_row(self, row):
for index, value in enumerate(row):
if index in self._skip:
continue
self.check_type(index, value)
def feed(self, data):
for row in data:
self.process_row(row)
def priority(self, *field_types):
"""Decide the priority between each possible type"""
return field_types[0] if field_types else self.fallback_type
@property
def fields(self):
possible, skip = self._possible_types, self._skip
if possible:
# Create a header with placeholder values for each detected column
# and then join this placeholders with original header - the
# original header may have less columns then the detected ones, so
# we end with a full header having a name for every possible
# column.
placeholders = make_header(range(max(possible.keys()) + 1))
header = [a or b for a, b in zip_longest(self.field_names, placeholders)]
else:
header = self.field_names
return OrderedDict(
[
(
field_name,
self.priority(*(possible[index] if index in possible else [])),
)
for index, field_name in enumerate(header)
if index not in skip
]
)
NULL = ("-", "null", "none", "nil", "n/a", "na")
NULL_BYTES = (b"-", b"null", b"none", b"nil", b"n/a", b"na")
SLUG_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def make_header(field_names, permit_not=False):
"""Return unique and slugged field names."""
slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^"
header = [
slug(field_name, permitted_chars=slug_chars) for field_name in field_names
]
result = []
for index, field_name in enumerate(header):
if not field_name:
field_name = "field_{}".format(index)
elif field_name[0].isdigit():
field_name = "field_{}".format(field_name)
if field_name in result:
field_name = make_unique_name(
name=field_name, existing_names=result, start=2
)
result.append(field_name)
return result
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name
def slug(text, separator="_", permitted_chars=SLUG_CHARS, replace_with_separator=" -_"):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace spaces and other chars with separator
# Example: u' ALVARO justen% ' -> u'_ALVARO__justen%_'
for char in replace_with_separator:
text = text.replace(char, separator)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
text = "".join(char for char in text if char in permitted_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
double_separator = separator + separator
while double_separator in text:
text = text.replace(double_separator, separator)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator)
def value_error(value, cls):
value = repr(value)
if len(value) > 50:
value = value[:50] + "..."
raise ValueError("Value '{}' can't be {}".format(value, cls.__name__))
def normalize(vocab, tokens):
words = inflect_words(vocab, tokens)
words = recover_shapes(words, tokens)
return recover_spaces(words, tokens)
Based on the information above, please complete the function in the current file rows/rows/fields.py:
def as_string(value):
|
as_string
|
rows/rows/fields.py
|
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
|
rows.fields.get_items
|
function
|
Text-Processing/rows
|
Text-Processing/rows/rows/fields.py
|
[
506,
506
] |
[
513,
515
] |
{
"Arguments": ":param indexes: Tuple. The indexes of the object to be fetched.\n:return: Lambda function. A callable that fetches the given indexes of an object.",
"Functionality": "This function returns a callable that fetches the given indexes of an object. It always returns a tuple even when len(indexes) == 1. It is similar to `operator.itemgetter`, but will insert `None` when the object does not have the desired index (instead of raising IndexError)."
}
|
[
"tests/tests_fields.py::FieldsFunctionsTestCase::test_get_items"
] | 4
|
get_items@rows/rows/fields.py
|
{
"code": "def get_items(*indexes):\n \"\"\"Return a callable that fetches the given indexes of an object\n Always return a tuple even when len(indexes) == 1.\n\n Similar to `operator.itemgetter`, but will insert `None` when the object\n does not have the desired index (instead of raising IndexError).\n \"\"\"\n return lambda obj: tuple(\n obj[index] if len(obj) > index else None for index in indexes\n )",
"description": "Return a callable that fetches the given indexes of an object\nAlways return a tuple even when len(indexes) == 1.\n\nSimilar to `operator.itemgetter`, but will insert `None` when the object\ndoes not have the desired index (instead of raising IndexError).",
"file_path": "rows/rows/fields.py",
"incoming_calls": [
"get_item@python-benedict/benedict/dicts/keylist/keylist_util.py",
"create_table@rows/rows/plugins/utils.py",
"FieldsFunctionsTestCase.test_get_items@rows/tests/tests_fields.py"
],
"name": "get_items",
"signature": "def get_items(*indexes):\n"
}
|
[
"from base64 import b64decode",
"from base64 import b64encode",
"import datetime",
"import json",
"import re",
"from __future__ import unicode_literals",
"from collections import OrderedDict",
"from collections import defaultdict",
"from decimal import Decimal",
"from decimal import InvalidOperation",
"from unicodedata import normalize",
"import binascii",
"import locale",
"import six"
] |
def get_items(*indexes):
"""Return a callable that fetches the given indexes of an object
Always return a tuple even when len(indexes) == 1.
Similar to `operator.itemgetter`, but will insert `None` when the object
does not have the desired index (instead of raising IndexError).
"""
|
You are a Python programmer working with a repository. Here is all the context you may find useful to complete the function:
#CURRENT FILE rows/rows/fields.py
from base64 import b64decode
from base64 import b64encode
import datetime
import json
import re
from __future__ import unicode_literals
from collections import OrderedDict
from collections import defaultdict
from decimal import Decimal
from decimal import InvalidOperation
from unicodedata import normalize
import binascii
import locale
import six
NULL = ("-", "null", "none", "nil", "n/a", "na")
def as_string(value):
if isinstance(value, six.binary_type):
raise ValueError("Binary is not supported")
elif isinstance(value, six.text_type):
return value
else:
return six.text_type(value)
def is_null(value):
if value is None:
return True
elif type(value) is six.binary_type:
value = value.strip().lower()
return not value or value in NULL_BYTES
else:
value_str = as_string(value).strip().lower()
return not value_str or value_str in NULL
def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name
def slug(text, separator="_", permitted_chars=SLUG_CHARS, replace_with_separator=" -_"):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace spaces and other chars with separator
# Example: u' ALVARO justen% ' -> u'_ALVARO__justen%_'
for char in replace_with_separator:
text = text.replace(char, separator)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
text = "".join(char for char in text if char in permitted_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
double_separator = separator + separator
while double_separator in text:
text = text.replace(double_separator, separator)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator)
def unique_values(values):
result = []
for value in values:
if not is_null(value) and value not in result:
result.append(value)
return result
def value_error(value, cls):
value = repr(value)
if len(value) > 50:
value = value[:50] + "..."
raise ValueError("Value '{}' can't be {}".format(value, cls.__name__))
Based on the information above, please complete the function in the current file rows/rows/fields.py:
def get_items(*indexes):
"""Return a callable that fetches the given indexes of an object
Always return a tuple even when len(indexes) == 1.
Similar to `operator.itemgetter`, but will insert `None` when the object
does not have the desired index (instead of raising IndexError).
"""
|
get_items
|
rows/rows/fields.py
|
def get_item(d, keys):
items = get_items(d, keys)
return items[-1] if items else (None, None, None)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 6