pruned venvs
This commit is contained in:
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
@@ -1,84 +0,0 @@
|
||||
"""
|
||||
Internal module for console introspection
|
||||
"""
|
||||
|
||||
import sys
|
||||
import locale
|
||||
from pandas.io.formats.terminal import get_terminal_size
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Global formatting options
|
||||
_initial_defencoding = None
|
||||
|
||||
|
||||
def detect_console_encoding():
|
||||
"""
|
||||
Try to find the most capable encoding supported by the console.
|
||||
slightly modified from the way IPython handles the same issue.
|
||||
"""
|
||||
global _initial_defencoding
|
||||
|
||||
encoding = None
|
||||
try:
|
||||
encoding = sys.stdout.encoding or sys.stdin.encoding
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# try again for something better
|
||||
if not encoding or 'ascii' in encoding.lower():
|
||||
try:
|
||||
encoding = locale.getpreferredencoding()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# when all else fails. this will usually be "ascii"
|
||||
if not encoding or 'ascii' in encoding.lower():
|
||||
encoding = sys.getdefaultencoding()
|
||||
|
||||
# GH3360, save the reported defencoding at import time
|
||||
# MPL backends may change it. Make available for debugging.
|
||||
if not _initial_defencoding:
|
||||
_initial_defencoding = sys.getdefaultencoding()
|
||||
|
||||
return encoding
|
||||
|
||||
|
||||
def get_console_size():
|
||||
"""Return console size as tuple = (width, height).
|
||||
|
||||
Returns (None,None) in non-interactive session.
|
||||
"""
|
||||
from pandas import get_option
|
||||
from pandas.core import common as com
|
||||
|
||||
display_width = get_option('display.width')
|
||||
# deprecated.
|
||||
display_height = get_option('display.max_rows')
|
||||
|
||||
# Consider
|
||||
# interactive shell terminal, can detect term size
|
||||
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
|
||||
# size non-interactive script, should disregard term size
|
||||
|
||||
# in addition
|
||||
# width,height have default values, but setting to 'None' signals
|
||||
# should use Auto-Detection, But only in interactive shell-terminal.
|
||||
# Simple. yeah.
|
||||
|
||||
if com.in_interactive_session():
|
||||
if com.in_ipython_frontend():
|
||||
# sane defaults for interactive non-shell terminal
|
||||
# match default for width,height in config_init
|
||||
from pandas.core.config import get_default_val
|
||||
terminal_width = get_default_val('display.width')
|
||||
terminal_height = get_default_val('display.max_rows')
|
||||
else:
|
||||
# pure terminal
|
||||
terminal_width, terminal_height = get_terminal_size()
|
||||
else:
|
||||
terminal_width, terminal_height = None, None
|
||||
|
||||
# Note if the User sets width/Height to None (auto-detection)
|
||||
# and we're in a script (non-inter), this will return (None,None)
|
||||
# caller needs to deal.
|
||||
return (display_width or terminal_width, display_height or terminal_height)
|
||||
@@ -1,250 +0,0 @@
|
||||
"""Utilities for interpreting CSS from Stylers for formatting non-HTML outputs
|
||||
"""
|
||||
|
||||
import re
|
||||
import warnings
|
||||
|
||||
|
||||
class CSSWarning(UserWarning):
|
||||
"""This CSS syntax cannot currently be parsed"""
|
||||
pass
|
||||
|
||||
|
||||
class CSSResolver(object):
|
||||
"""A callable for parsing and resolving CSS to atomic properties
|
||||
|
||||
"""
|
||||
|
||||
INITIAL_STYLE = {
|
||||
}
|
||||
|
||||
def __call__(self, declarations_str, inherited=None):
|
||||
""" the given declarations to atomic properties
|
||||
|
||||
Parameters
|
||||
----------
|
||||
declarations_str : str
|
||||
A list of CSS declarations
|
||||
inherited : dict, optional
|
||||
Atomic properties indicating the inherited style context in which
|
||||
declarations_str is to be resolved. ``inherited`` should already
|
||||
be resolved, i.e. valid output of this method.
|
||||
|
||||
Returns
|
||||
-------
|
||||
props : dict
|
||||
Atomic CSS 2.2 properties
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> resolve = CSSResolver()
|
||||
>>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
|
||||
>>> out = resolve('''
|
||||
... border-color: BLUE RED;
|
||||
... font-size: 1em;
|
||||
... font-size: 2em;
|
||||
... font-weight: normal;
|
||||
... font-weight: inherit;
|
||||
... ''', inherited)
|
||||
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
|
||||
[('border-bottom-color', 'blue'),
|
||||
('border-left-color', 'red'),
|
||||
('border-right-color', 'red'),
|
||||
('border-top-color', 'blue'),
|
||||
('font-family', 'serif'),
|
||||
('font-size', '24pt'),
|
||||
('font-weight', 'bold')]
|
||||
"""
|
||||
|
||||
props = dict(self.atomize(self.parse(declarations_str)))
|
||||
if inherited is None:
|
||||
inherited = {}
|
||||
|
||||
# 1. resolve inherited, initial
|
||||
for prop, val in inherited.items():
|
||||
if prop not in props:
|
||||
props[prop] = val
|
||||
|
||||
for prop, val in list(props.items()):
|
||||
if val == 'inherit':
|
||||
val = inherited.get(prop, 'initial')
|
||||
if val == 'initial':
|
||||
val = self.INITIAL_STYLE.get(prop)
|
||||
|
||||
if val is None:
|
||||
# we do not define a complete initial stylesheet
|
||||
del props[prop]
|
||||
else:
|
||||
props[prop] = val
|
||||
|
||||
# 2. resolve relative font size
|
||||
if props.get('font-size'):
|
||||
if 'font-size' in inherited:
|
||||
em_pt = inherited['font-size']
|
||||
assert em_pt[-2:] == 'pt'
|
||||
em_pt = float(em_pt[:-2])
|
||||
else:
|
||||
em_pt = None
|
||||
props['font-size'] = self.size_to_pt(
|
||||
props['font-size'], em_pt, conversions=self.FONT_SIZE_RATIOS)
|
||||
|
||||
font_size = float(props['font-size'][:-2])
|
||||
else:
|
||||
font_size = None
|
||||
|
||||
# 3. TODO: resolve other font-relative units
|
||||
for side in self.SIDES:
|
||||
prop = 'border-{side}-width'.format(side=side)
|
||||
if prop in props:
|
||||
props[prop] = self.size_to_pt(
|
||||
props[prop], em_pt=font_size,
|
||||
conversions=self.BORDER_WIDTH_RATIOS)
|
||||
for prop in ['margin-{side}'.format(side=side),
|
||||
'padding-{side}'.format(side=side)]:
|
||||
if prop in props:
|
||||
# TODO: support %
|
||||
props[prop] = self.size_to_pt(
|
||||
props[prop], em_pt=font_size,
|
||||
conversions=self.MARGIN_RATIOS)
|
||||
|
||||
return props
|
||||
|
||||
UNIT_RATIOS = {
|
||||
'rem': ('pt', 12),
|
||||
'ex': ('em', .5),
|
||||
# 'ch':
|
||||
'px': ('pt', .75),
|
||||
'pc': ('pt', 12),
|
||||
'in': ('pt', 72),
|
||||
'cm': ('in', 1 / 2.54),
|
||||
'mm': ('in', 1 / 25.4),
|
||||
'q': ('mm', .25),
|
||||
'!!default': ('em', 0),
|
||||
}
|
||||
|
||||
FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
|
||||
FONT_SIZE_RATIOS.update({
|
||||
'%': ('em', .01),
|
||||
'xx-small': ('rem', .5),
|
||||
'x-small': ('rem', .625),
|
||||
'small': ('rem', .8),
|
||||
'medium': ('rem', 1),
|
||||
'large': ('rem', 1.125),
|
||||
'x-large': ('rem', 1.5),
|
||||
'xx-large': ('rem', 2),
|
||||
'smaller': ('em', 1 / 1.2),
|
||||
'larger': ('em', 1.2),
|
||||
'!!default': ('em', 1),
|
||||
})
|
||||
|
||||
MARGIN_RATIOS = UNIT_RATIOS.copy()
|
||||
MARGIN_RATIOS.update({
|
||||
'none': ('pt', 0),
|
||||
})
|
||||
|
||||
BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
|
||||
BORDER_WIDTH_RATIOS.update({
|
||||
'none': ('pt', 0),
|
||||
'thick': ('px', 4),
|
||||
'medium': ('px', 2),
|
||||
'thin': ('px', 1),
|
||||
# Default: medium only if solid
|
||||
})
|
||||
|
||||
def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS):
|
||||
def _error():
|
||||
warnings.warn('Unhandled size: {val!r}'.format(val=in_val),
|
||||
CSSWarning)
|
||||
return self.size_to_pt('1!!default', conversions=conversions)
|
||||
|
||||
try:
|
||||
val, unit = re.match(r'^(\S*?)([a-zA-Z%!].*)', in_val).groups()
|
||||
except AttributeError:
|
||||
return _error()
|
||||
if val == '':
|
||||
# hack for 'large' etc.
|
||||
val = 1
|
||||
else:
|
||||
try:
|
||||
val = float(val)
|
||||
except ValueError:
|
||||
return _error()
|
||||
|
||||
while unit != 'pt':
|
||||
if unit == 'em':
|
||||
if em_pt is None:
|
||||
unit = 'rem'
|
||||
else:
|
||||
val *= em_pt
|
||||
unit = 'pt'
|
||||
continue
|
||||
|
||||
try:
|
||||
unit, mul = conversions[unit]
|
||||
except KeyError:
|
||||
return _error()
|
||||
val *= mul
|
||||
|
||||
val = round(val, 5)
|
||||
if int(val) == val:
|
||||
size_fmt = '{fmt:d}pt'.format(fmt=int(val))
|
||||
else:
|
||||
size_fmt = '{fmt:f}pt'.format(fmt=val)
|
||||
return size_fmt
|
||||
|
||||
def atomize(self, declarations):
|
||||
for prop, value in declarations:
|
||||
attr = 'expand_' + prop.replace('-', '_')
|
||||
try:
|
||||
expand = getattr(self, attr)
|
||||
except AttributeError:
|
||||
yield prop, value
|
||||
else:
|
||||
for prop, value in expand(prop, value):
|
||||
yield prop, value
|
||||
|
||||
SIDE_SHORTHANDS = {
|
||||
1: [0, 0, 0, 0],
|
||||
2: [0, 1, 0, 1],
|
||||
3: [0, 1, 2, 1],
|
||||
4: [0, 1, 2, 3],
|
||||
}
|
||||
SIDES = ('top', 'right', 'bottom', 'left')
|
||||
|
||||
def _side_expander(prop_fmt):
|
||||
def expand(self, prop, value):
|
||||
tokens = value.split()
|
||||
try:
|
||||
mapping = self.SIDE_SHORTHANDS[len(tokens)]
|
||||
except KeyError:
|
||||
warnings.warn('Could not expand "{prop}: {val}"'
|
||||
.format(prop=prop, val=value), CSSWarning)
|
||||
return
|
||||
for key, idx in zip(self.SIDES, mapping):
|
||||
yield prop_fmt.format(key), tokens[idx]
|
||||
|
||||
return expand
|
||||
|
||||
expand_border_color = _side_expander('border-{:s}-color')
|
||||
expand_border_style = _side_expander('border-{:s}-style')
|
||||
expand_border_width = _side_expander('border-{:s}-width')
|
||||
expand_margin = _side_expander('margin-{:s}')
|
||||
expand_padding = _side_expander('padding-{:s}')
|
||||
|
||||
def parse(self, declarations_str):
|
||||
"""Generates (prop, value) pairs from declarations
|
||||
|
||||
In a future version may generate parsed tokens from tinycss/tinycss2
|
||||
"""
|
||||
for decl in declarations_str.split(';'):
|
||||
if not decl.strip():
|
||||
continue
|
||||
prop, sep, val = decl.partition(':')
|
||||
prop = prop.strip().lower()
|
||||
# TODO: don't lowercase case sensitive parts of values (strings)
|
||||
val = val.strip().lower()
|
||||
if sep:
|
||||
yield prop, val
|
||||
else:
|
||||
warnings.warn('Ill-formatted attribute: expected a colon '
|
||||
'in {decl!r}'.format(decl=decl), CSSWarning)
|
||||
@@ -1,313 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Module for formatting output data into CSV files.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import warnings
|
||||
|
||||
import csv as csvlib
|
||||
from zipfile import ZipFile
|
||||
import numpy as np
|
||||
|
||||
from pandas.core.dtypes.missing import notna
|
||||
from pandas.core.index import Index, MultiIndex
|
||||
from pandas import compat
|
||||
from pandas.compat import (StringIO, range, zip)
|
||||
|
||||
from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user,
|
||||
_stringify_path)
|
||||
from pandas._libs import writers as libwriters
|
||||
from pandas.core.indexes.datetimes import DatetimeIndex
|
||||
from pandas.core.indexes.period import PeriodIndex
|
||||
|
||||
|
||||
class CSVFormatter(object):
|
||||
|
||||
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
|
||||
float_format=None, cols=None, header=True, index=True,
|
||||
index_label=None, mode='w', nanRep=None, encoding=None,
|
||||
compression=None, quoting=None, line_terminator='\n',
|
||||
chunksize=None, tupleize_cols=False, quotechar='"',
|
||||
date_format=None, doublequote=True, escapechar=None,
|
||||
decimal='.'):
|
||||
|
||||
self.obj = obj
|
||||
|
||||
if path_or_buf is None:
|
||||
path_or_buf = StringIO()
|
||||
|
||||
self.path_or_buf = _expand_user(_stringify_path(path_or_buf))
|
||||
self.sep = sep
|
||||
self.na_rep = na_rep
|
||||
self.float_format = float_format
|
||||
self.decimal = decimal
|
||||
|
||||
self.header = header
|
||||
self.index = index
|
||||
self.index_label = index_label
|
||||
self.mode = mode
|
||||
self.encoding = encoding
|
||||
self.compression = compression
|
||||
|
||||
if quoting is None:
|
||||
quoting = csvlib.QUOTE_MINIMAL
|
||||
self.quoting = quoting
|
||||
|
||||
if quoting == csvlib.QUOTE_NONE:
|
||||
# prevents crash in _csv
|
||||
quotechar = None
|
||||
self.quotechar = quotechar
|
||||
|
||||
self.doublequote = doublequote
|
||||
self.escapechar = escapechar
|
||||
|
||||
self.line_terminator = line_terminator
|
||||
|
||||
self.date_format = date_format
|
||||
|
||||
self.tupleize_cols = tupleize_cols
|
||||
self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and
|
||||
not self.tupleize_cols)
|
||||
|
||||
# validate mi options
|
||||
if self.has_mi_columns:
|
||||
if cols is not None:
|
||||
raise TypeError("cannot specify cols with a MultiIndex on the "
|
||||
"columns")
|
||||
|
||||
if cols is not None:
|
||||
if isinstance(cols, Index):
|
||||
cols = cols.to_native_types(na_rep=na_rep,
|
||||
float_format=float_format,
|
||||
date_format=date_format,
|
||||
quoting=self.quoting)
|
||||
else:
|
||||
cols = list(cols)
|
||||
self.obj = self.obj.loc[:, cols]
|
||||
|
||||
# update columns to include possible multiplicity of dupes
|
||||
# and make sure sure cols is just a list of labels
|
||||
cols = self.obj.columns
|
||||
if isinstance(cols, Index):
|
||||
cols = cols.to_native_types(na_rep=na_rep,
|
||||
float_format=float_format,
|
||||
date_format=date_format,
|
||||
quoting=self.quoting)
|
||||
else:
|
||||
cols = list(cols)
|
||||
|
||||
# save it
|
||||
self.cols = cols
|
||||
|
||||
# preallocate data 2d list
|
||||
self.blocks = self.obj._data.blocks
|
||||
ncols = sum(b.shape[0] for b in self.blocks)
|
||||
self.data = [None] * ncols
|
||||
|
||||
if chunksize is None:
|
||||
chunksize = (100000 // (len(self.cols) or 1)) or 1
|
||||
self.chunksize = int(chunksize)
|
||||
|
||||
self.data_index = obj.index
|
||||
if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and
|
||||
date_format is not None):
|
||||
self.data_index = Index([x.strftime(date_format) if notna(x) else
|
||||
'' for x in self.data_index])
|
||||
|
||||
self.nlevels = getattr(self.data_index, 'nlevels', 1)
|
||||
if not index:
|
||||
self.nlevels = 0
|
||||
|
||||
def save(self):
|
||||
# create the writer & save
|
||||
if self.encoding is None:
|
||||
if compat.PY2:
|
||||
encoding = 'ascii'
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
else:
|
||||
encoding = self.encoding
|
||||
|
||||
# GH 21227 internal compression is not used when file-like passed.
|
||||
if self.compression and hasattr(self.path_or_buf, 'write'):
|
||||
msg = ("compression has no effect when passing file-like "
|
||||
"object as input.")
|
||||
warnings.warn(msg, RuntimeWarning, stacklevel=2)
|
||||
|
||||
# when zip compression is called.
|
||||
is_zip = isinstance(self.path_or_buf, ZipFile) or (
|
||||
not hasattr(self.path_or_buf, 'write')
|
||||
and self.compression == 'zip')
|
||||
|
||||
if is_zip:
|
||||
# zipfile doesn't support writing string to archive. uses string
|
||||
# buffer to receive csv writing and dump into zip compression
|
||||
# file handle. GH 21241, 21118
|
||||
f = StringIO()
|
||||
close = False
|
||||
elif hasattr(self.path_or_buf, 'write'):
|
||||
f = self.path_or_buf
|
||||
close = False
|
||||
else:
|
||||
f, handles = _get_handle(self.path_or_buf, self.mode,
|
||||
encoding=encoding,
|
||||
compression=self.compression)
|
||||
close = True
|
||||
|
||||
try:
|
||||
writer_kwargs = dict(lineterminator=self.line_terminator,
|
||||
delimiter=self.sep, quoting=self.quoting,
|
||||
doublequote=self.doublequote,
|
||||
escapechar=self.escapechar,
|
||||
quotechar=self.quotechar)
|
||||
if encoding == 'ascii':
|
||||
self.writer = csvlib.writer(f, **writer_kwargs)
|
||||
else:
|
||||
writer_kwargs['encoding'] = encoding
|
||||
self.writer = UnicodeWriter(f, **writer_kwargs)
|
||||
|
||||
self._save()
|
||||
|
||||
finally:
|
||||
if is_zip:
|
||||
# GH 17778 handles zip compression separately.
|
||||
buf = f.getvalue()
|
||||
if hasattr(self.path_or_buf, 'write'):
|
||||
self.path_or_buf.write(buf)
|
||||
else:
|
||||
f, handles = _get_handle(self.path_or_buf, self.mode,
|
||||
encoding=encoding,
|
||||
compression=self.compression)
|
||||
f.write(buf)
|
||||
close = True
|
||||
if close:
|
||||
f.close()
|
||||
for _fh in handles:
|
||||
_fh.close()
|
||||
|
||||
def _save_header(self):
|
||||
|
||||
writer = self.writer
|
||||
obj = self.obj
|
||||
index_label = self.index_label
|
||||
cols = self.cols
|
||||
has_mi_columns = self.has_mi_columns
|
||||
header = self.header
|
||||
encoded_labels = []
|
||||
|
||||
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
|
||||
if not (has_aliases or self.header):
|
||||
return
|
||||
if has_aliases:
|
||||
if len(header) != len(cols):
|
||||
raise ValueError(('Writing {ncols} cols but got {nalias} '
|
||||
'aliases'.format(ncols=len(cols),
|
||||
nalias=len(header))))
|
||||
else:
|
||||
write_cols = header
|
||||
else:
|
||||
write_cols = cols
|
||||
|
||||
if self.index:
|
||||
# should write something for index label
|
||||
if index_label is not False:
|
||||
if index_label is None:
|
||||
if isinstance(obj.index, MultiIndex):
|
||||
index_label = []
|
||||
for i, name in enumerate(obj.index.names):
|
||||
if name is None:
|
||||
name = ''
|
||||
index_label.append(name)
|
||||
else:
|
||||
index_label = obj.index.name
|
||||
if index_label is None:
|
||||
index_label = ['']
|
||||
else:
|
||||
index_label = [index_label]
|
||||
elif not isinstance(index_label,
|
||||
(list, tuple, np.ndarray, Index)):
|
||||
# given a string for a DF with Index
|
||||
index_label = [index_label]
|
||||
|
||||
encoded_labels = list(index_label)
|
||||
else:
|
||||
encoded_labels = []
|
||||
|
||||
if not has_mi_columns or has_aliases:
|
||||
encoded_labels += list(write_cols)
|
||||
writer.writerow(encoded_labels)
|
||||
else:
|
||||
# write out the mi
|
||||
columns = obj.columns
|
||||
|
||||
# write out the names for each level, then ALL of the values for
|
||||
# each level
|
||||
for i in range(columns.nlevels):
|
||||
|
||||
# we need at least 1 index column to write our col names
|
||||
col_line = []
|
||||
if self.index:
|
||||
|
||||
# name is the first column
|
||||
col_line.append(columns.names[i])
|
||||
|
||||
if isinstance(index_label, list) and len(index_label) > 1:
|
||||
col_line.extend([''] * (len(index_label) - 1))
|
||||
|
||||
col_line.extend(columns._get_level_values(i))
|
||||
|
||||
writer.writerow(col_line)
|
||||
|
||||
# Write out the index line if it's not empty.
|
||||
# Otherwise, we will print out an extraneous
|
||||
# blank line between the mi and the data rows.
|
||||
if encoded_labels and set(encoded_labels) != set(['']):
|
||||
encoded_labels.extend([''] * len(columns))
|
||||
writer.writerow(encoded_labels)
|
||||
|
||||
def _save(self):
|
||||
|
||||
self._save_header()
|
||||
|
||||
nrows = len(self.data_index)
|
||||
|
||||
# write in chunksize bites
|
||||
chunksize = self.chunksize
|
||||
chunks = int(nrows / chunksize) + 1
|
||||
|
||||
for i in range(chunks):
|
||||
start_i = i * chunksize
|
||||
end_i = min((i + 1) * chunksize, nrows)
|
||||
if start_i >= end_i:
|
||||
break
|
||||
|
||||
self._save_chunk(start_i, end_i)
|
||||
|
||||
def _save_chunk(self, start_i, end_i):
|
||||
|
||||
data_index = self.data_index
|
||||
|
||||
# create the data for a chunk
|
||||
slicer = slice(start_i, end_i)
|
||||
for i in range(len(self.blocks)):
|
||||
b = self.blocks[i]
|
||||
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
|
||||
float_format=self.float_format,
|
||||
decimal=self.decimal,
|
||||
date_format=self.date_format,
|
||||
quoting=self.quoting)
|
||||
|
||||
for col_loc, col in zip(b.mgr_locs, d):
|
||||
# self.data is a preallocated list
|
||||
self.data[col_loc] = col
|
||||
|
||||
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
|
||||
float_format=self.float_format,
|
||||
decimal=self.decimal,
|
||||
date_format=self.date_format,
|
||||
quoting=self.quoting)
|
||||
|
||||
libwriters.write_csv_rows(self.data, ix, self.nlevels,
|
||||
self.cols, self.writer)
|
||||
@@ -1,654 +0,0 @@
|
||||
"""Utilities for conversion to writer-agnostic Excel representation
|
||||
"""
|
||||
|
||||
import re
|
||||
import warnings
|
||||
import itertools
|
||||
|
||||
import numpy as np
|
||||
|
||||
from pandas.compat import reduce
|
||||
from pandas.io.formats.css import CSSResolver, CSSWarning
|
||||
from pandas.io.formats.printing import pprint_thing
|
||||
import pandas.core.common as com
|
||||
from pandas.core.dtypes.common import is_float, is_scalar
|
||||
from pandas.core.dtypes import missing
|
||||
from pandas import Index, MultiIndex, PeriodIndex
|
||||
from pandas.io.formats.format import get_level_lengths
|
||||
|
||||
|
||||
class ExcelCell(object):
|
||||
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
|
||||
__slots__ = __fields__
|
||||
|
||||
def __init__(self, row, col, val, style=None, mergestart=None,
|
||||
mergeend=None):
|
||||
self.row = row
|
||||
self.col = col
|
||||
self.val = val
|
||||
self.style = style
|
||||
self.mergestart = mergestart
|
||||
self.mergeend = mergeend
|
||||
|
||||
|
||||
header_style = {"font": {"bold": True},
|
||||
"borders": {"top": "thin",
|
||||
"right": "thin",
|
||||
"bottom": "thin",
|
||||
"left": "thin"},
|
||||
"alignment": {"horizontal": "center",
|
||||
"vertical": "top"}}
|
||||
|
||||
|
||||
class CSSToExcelConverter(object):
|
||||
"""A callable for converting CSS declarations to ExcelWriter styles
|
||||
|
||||
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
|
||||
focusing on font styling, backgrounds, borders and alignment.
|
||||
|
||||
Operates by first computing CSS styles in a fairly generic
|
||||
way (see :meth:`compute_css`) then determining Excel style
|
||||
properties from CSS properties (see :meth:`build_xlstyle`).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inherited : str, optional
|
||||
CSS declarations understood to be the containing scope for the
|
||||
CSS processed by :meth:`__call__`.
|
||||
"""
|
||||
# NB: Most of the methods here could be classmethods, as only __init__
|
||||
# and __call__ make use of instance attributes. We leave them as
|
||||
# instancemethods so that users can easily experiment with extensions
|
||||
# without monkey-patching.
|
||||
|
||||
def __init__(self, inherited=None):
|
||||
if inherited is not None:
|
||||
inherited = self.compute_css(inherited,
|
||||
self.compute_css.INITIAL_STYLE)
|
||||
|
||||
self.inherited = inherited
|
||||
|
||||
compute_css = CSSResolver()
|
||||
|
||||
def __call__(self, declarations_str):
|
||||
"""Convert CSS declarations to ExcelWriter style
|
||||
|
||||
Parameters
|
||||
----------
|
||||
declarations_str : str
|
||||
List of CSS declarations.
|
||||
e.g. "font-weight: bold; background: blue"
|
||||
|
||||
Returns
|
||||
-------
|
||||
xlstyle : dict
|
||||
A style as interpreted by ExcelWriter when found in
|
||||
ExcelCell.style.
|
||||
"""
|
||||
# TODO: memoize?
|
||||
properties = self.compute_css(declarations_str, self.inherited)
|
||||
return self.build_xlstyle(properties)
|
||||
|
||||
def build_xlstyle(self, props):
|
||||
out = {
|
||||
'alignment': self.build_alignment(props),
|
||||
'border': self.build_border(props),
|
||||
'fill': self.build_fill(props),
|
||||
'font': self.build_font(props),
|
||||
}
|
||||
# TODO: support number format
|
||||
# TODO: handle cell width and height: needs support in pandas.io.excel
|
||||
|
||||
def remove_none(d):
|
||||
"""Remove key where value is None, through nested dicts"""
|
||||
for k, v in list(d.items()):
|
||||
if v is None:
|
||||
del d[k]
|
||||
elif isinstance(v, dict):
|
||||
remove_none(v)
|
||||
if not v:
|
||||
del d[k]
|
||||
|
||||
remove_none(out)
|
||||
return out
|
||||
|
||||
VERTICAL_MAP = {
|
||||
'top': 'top',
|
||||
'text-top': 'top',
|
||||
'middle': 'center',
|
||||
'baseline': 'bottom',
|
||||
'bottom': 'bottom',
|
||||
'text-bottom': 'bottom',
|
||||
# OpenXML also has 'justify', 'distributed'
|
||||
}
|
||||
|
||||
def build_alignment(self, props):
|
||||
# TODO: text-indent, padding-left -> alignment.indent
|
||||
return {'horizontal': props.get('text-align'),
|
||||
'vertical': self.VERTICAL_MAP.get(props.get('vertical-align')),
|
||||
'wrap_text': (None if props.get('white-space') is None else
|
||||
props['white-space'] not in
|
||||
('nowrap', 'pre', 'pre-line'))
|
||||
}
|
||||
|
||||
def build_border(self, props):
|
||||
return {side: {
|
||||
'style': self._border_style(props.get('border-{side}-style'
|
||||
.format(side=side)),
|
||||
props.get('border-{side}-width'
|
||||
.format(side=side))),
|
||||
'color': self.color_to_excel(
|
||||
props.get('border-{side}-color'.format(side=side))),
|
||||
} for side in ['top', 'right', 'bottom', 'left']}
|
||||
|
||||
def _border_style(self, style, width):
|
||||
# convert styles and widths to openxml, one of:
|
||||
# 'dashDot'
|
||||
# 'dashDotDot'
|
||||
# 'dashed'
|
||||
# 'dotted'
|
||||
# 'double'
|
||||
# 'hair'
|
||||
# 'medium'
|
||||
# 'mediumDashDot'
|
||||
# 'mediumDashDotDot'
|
||||
# 'mediumDashed'
|
||||
# 'slantDashDot'
|
||||
# 'thick'
|
||||
# 'thin'
|
||||
if width is None and style is None:
|
||||
return None
|
||||
if style == 'none' or style == 'hidden':
|
||||
return None
|
||||
|
||||
if width is None:
|
||||
width = '2pt'
|
||||
width = float(width[:-2])
|
||||
if width < 1e-5:
|
||||
return None
|
||||
elif width < 1.3:
|
||||
width_name = 'thin'
|
||||
elif width < 2.8:
|
||||
width_name = 'medium'
|
||||
else:
|
||||
width_name = 'thick'
|
||||
|
||||
if style in (None, 'groove', 'ridge', 'inset', 'outset'):
|
||||
# not handled
|
||||
style = 'solid'
|
||||
|
||||
if style == 'double':
|
||||
return 'double'
|
||||
if style == 'solid':
|
||||
return width_name
|
||||
if style == 'dotted':
|
||||
if width_name in ('hair', 'thin'):
|
||||
return 'dotted'
|
||||
return 'mediumDashDotDot'
|
||||
if style == 'dashed':
|
||||
if width_name in ('hair', 'thin'):
|
||||
return 'dashed'
|
||||
return 'mediumDashed'
|
||||
|
||||
def build_fill(self, props):
|
||||
# TODO: perhaps allow for special properties
|
||||
# -excel-pattern-bgcolor and -excel-pattern-type
|
||||
fill_color = props.get('background-color')
|
||||
if fill_color not in (None, 'transparent', 'none'):
|
||||
return {
|
||||
'fgColor': self.color_to_excel(fill_color),
|
||||
'patternType': 'solid',
|
||||
}
|
||||
|
||||
BOLD_MAP = {'bold': True, 'bolder': True, '600': True, '700': True,
|
||||
'800': True, '900': True,
|
||||
'normal': False, 'lighter': False, '100': False, '200': False,
|
||||
'300': False, '400': False, '500': False}
|
||||
ITALIC_MAP = {'normal': False, 'italic': True, 'oblique': True}
|
||||
|
||||
def build_font(self, props):
|
||||
size = props.get('font-size')
|
||||
if size is not None:
|
||||
assert size.endswith('pt')
|
||||
size = float(size[:-2])
|
||||
|
||||
font_names_tmp = re.findall(r'''(?x)
|
||||
(
|
||||
"(?:[^"]|\\")+"
|
||||
|
|
||||
'(?:[^']|\\')+'
|
||||
|
|
||||
[^'",]+
|
||||
)(?=,|\s*$)
|
||||
''', props.get('font-family', ''))
|
||||
font_names = []
|
||||
for name in font_names_tmp:
|
||||
if name[:1] == '"':
|
||||
name = name[1:-1].replace('\\"', '"')
|
||||
elif name[:1] == '\'':
|
||||
name = name[1:-1].replace('\\\'', '\'')
|
||||
else:
|
||||
name = name.strip()
|
||||
if name:
|
||||
font_names.append(name)
|
||||
|
||||
family = None
|
||||
for name in font_names:
|
||||
if name == 'serif':
|
||||
family = 1 # roman
|
||||
break
|
||||
elif name == 'sans-serif':
|
||||
family = 2 # swiss
|
||||
break
|
||||
elif name == 'cursive':
|
||||
family = 4 # script
|
||||
break
|
||||
elif name == 'fantasy':
|
||||
family = 5 # decorative
|
||||
break
|
||||
|
||||
decoration = props.get('text-decoration')
|
||||
if decoration is not None:
|
||||
decoration = decoration.split()
|
||||
else:
|
||||
decoration = ()
|
||||
|
||||
return {
|
||||
'name': font_names[0] if font_names else None,
|
||||
'family': family,
|
||||
'size': size,
|
||||
'bold': self.BOLD_MAP.get(props.get('font-weight')),
|
||||
'italic': self.ITALIC_MAP.get(props.get('font-style')),
|
||||
'underline': ('single' if
|
||||
'underline' in decoration
|
||||
else None),
|
||||
'strike': ('line-through' in decoration) or None,
|
||||
'color': self.color_to_excel(props.get('color')),
|
||||
# shadow if nonzero digit before shadow color
|
||||
'shadow': (bool(re.search('^[^#(]*[1-9]',
|
||||
props['text-shadow']))
|
||||
if 'text-shadow' in props else None),
|
||||
# 'vertAlign':,
|
||||
# 'charset': ,
|
||||
# 'scheme': ,
|
||||
# 'outline': ,
|
||||
# 'condense': ,
|
||||
}
|
||||
|
||||
NAMED_COLORS = {
|
||||
'maroon': '800000',
|
||||
'brown': 'A52A2A',
|
||||
'red': 'FF0000',
|
||||
'pink': 'FFC0CB',
|
||||
'orange': 'FFA500',
|
||||
'yellow': 'FFFF00',
|
||||
'olive': '808000',
|
||||
'green': '008000',
|
||||
'purple': '800080',
|
||||
'fuchsia': 'FF00FF',
|
||||
'lime': '00FF00',
|
||||
'teal': '008080',
|
||||
'aqua': '00FFFF',
|
||||
'blue': '0000FF',
|
||||
'navy': '000080',
|
||||
'black': '000000',
|
||||
'gray': '808080',
|
||||
'grey': '808080',
|
||||
'silver': 'C0C0C0',
|
||||
'white': 'FFFFFF',
|
||||
}
|
||||
|
||||
def color_to_excel(self, val):
|
||||
if val is None:
|
||||
return None
|
||||
if val.startswith('#') and len(val) == 7:
|
||||
return val[1:].upper()
|
||||
if val.startswith('#') and len(val) == 4:
|
||||
return (val[1] * 2 + val[2] * 2 + val[3] * 2).upper()
|
||||
try:
|
||||
return self.NAMED_COLORS[val]
|
||||
except KeyError:
|
||||
warnings.warn('Unhandled color format: {val!r}'.format(val=val),
|
||||
CSSWarning)
|
||||
|
||||
|
||||
class ExcelFormatter(object):
|
||||
"""
|
||||
Class for formatting a DataFrame to a list of ExcelCells,
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df : DataFrame or Styler
|
||||
na_rep: na representation
|
||||
float_format : string, default None
|
||||
Format string for floating point numbers
|
||||
cols : sequence, optional
|
||||
Columns to write
|
||||
header : boolean or list of string, default True
|
||||
Write out column names. If a list of string is given it is
|
||||
assumed to be aliases for the column names
|
||||
index : boolean, default True
|
||||
output row names (index)
|
||||
index_label : string or sequence, default None
|
||||
Column label for index column(s) if desired. If None is given, and
|
||||
`header` and `index` are True, then the index names are used. A
|
||||
sequence should be given if the DataFrame uses MultiIndex.
|
||||
merge_cells : boolean, default False
|
||||
Format MultiIndex and Hierarchical Rows as merged cells.
|
||||
inf_rep : string, default `'inf'`
|
||||
representation for np.inf values (which aren't representable in Excel)
|
||||
A `'-'` sign will be added in front of -inf.
|
||||
style_converter : callable, optional
|
||||
This translates Styler styles (CSS) into ExcelWriter styles.
|
||||
Defaults to ``CSSToExcelConverter()``.
|
||||
It should have signature css_declarations string -> excel style.
|
||||
This is only called for body cells.
|
||||
"""
|
||||
|
||||
def __init__(self, df, na_rep='', float_format=None, cols=None,
|
||||
header=True, index=True, index_label=None, merge_cells=False,
|
||||
inf_rep='inf', style_converter=None):
|
||||
self.rowcounter = 0
|
||||
self.na_rep = na_rep
|
||||
if hasattr(df, 'render'):
|
||||
self.styler = df
|
||||
df = df.data
|
||||
if style_converter is None:
|
||||
style_converter = CSSToExcelConverter()
|
||||
self.style_converter = style_converter
|
||||
else:
|
||||
self.styler = None
|
||||
self.df = df
|
||||
if cols is not None:
|
||||
|
||||
# all missing, raise
|
||||
if not len(Index(cols) & df.columns):
|
||||
raise KeyError(
|
||||
"passes columns are not ALL present dataframe")
|
||||
|
||||
# deprecatedin gh-17295
|
||||
# 1 missing is ok (for now)
|
||||
if len(Index(cols) & df.columns) != len(cols):
|
||||
warnings.warn(
|
||||
"Not all names specified in 'columns' are found; "
|
||||
"this will raise a KeyError in the future",
|
||||
FutureWarning)
|
||||
|
||||
self.df = df.reindex(columns=cols)
|
||||
self.columns = self.df.columns
|
||||
self.float_format = float_format
|
||||
self.index = index
|
||||
self.index_label = index_label
|
||||
self.header = header
|
||||
self.merge_cells = merge_cells
|
||||
self.inf_rep = inf_rep
|
||||
|
||||
def _format_value(self, val):
|
||||
if is_scalar(val) and missing.isna(val):
|
||||
val = self.na_rep
|
||||
elif is_float(val):
|
||||
if missing.isposinf_scalar(val):
|
||||
val = self.inf_rep
|
||||
elif missing.isneginf_scalar(val):
|
||||
val = '-{inf}'.format(inf=self.inf_rep)
|
||||
elif self.float_format is not None:
|
||||
val = float(self.float_format % val)
|
||||
return val
|
||||
|
||||
def _format_header_mi(self):
|
||||
if self.columns.nlevels > 1:
|
||||
if not self.index:
|
||||
raise NotImplementedError("Writing to Excel with MultiIndex"
|
||||
" columns and no index "
|
||||
"('index'=False) is not yet "
|
||||
"implemented.")
|
||||
|
||||
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
|
||||
if not (has_aliases or self.header):
|
||||
return
|
||||
|
||||
columns = self.columns
|
||||
level_strs = columns.format(sparsify=self.merge_cells, adjoin=False,
|
||||
names=False)
|
||||
level_lengths = get_level_lengths(level_strs)
|
||||
coloffset = 0
|
||||
lnum = 0
|
||||
|
||||
if self.index and isinstance(self.df.index, MultiIndex):
|
||||
coloffset = len(self.df.index[0]) - 1
|
||||
|
||||
if self.merge_cells:
|
||||
# Format multi-index as a merged cells.
|
||||
for lnum in range(len(level_lengths)):
|
||||
name = columns.names[lnum]
|
||||
yield ExcelCell(lnum, coloffset, name, header_style)
|
||||
|
||||
for lnum, (spans, levels, labels) in enumerate(zip(
|
||||
level_lengths, columns.levels, columns.labels)):
|
||||
values = levels.take(labels)
|
||||
for i in spans:
|
||||
if spans[i] > 1:
|
||||
yield ExcelCell(lnum, coloffset + i + 1, values[i],
|
||||
header_style, lnum,
|
||||
coloffset + i + spans[i])
|
||||
else:
|
||||
yield ExcelCell(lnum, coloffset + i + 1, values[i],
|
||||
header_style)
|
||||
else:
|
||||
# Format in legacy format with dots to indicate levels.
|
||||
for i, values in enumerate(zip(*level_strs)):
|
||||
v = ".".join(map(pprint_thing, values))
|
||||
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
|
||||
|
||||
self.rowcounter = lnum
|
||||
|
||||
def _format_header_regular(self):
|
||||
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
|
||||
if has_aliases or self.header:
|
||||
coloffset = 0
|
||||
|
||||
if self.index:
|
||||
coloffset = 1
|
||||
if isinstance(self.df.index, MultiIndex):
|
||||
coloffset = len(self.df.index[0])
|
||||
|
||||
colnames = self.columns
|
||||
if has_aliases:
|
||||
if len(self.header) != len(self.columns):
|
||||
raise ValueError('Writing {cols} cols but got {alias} '
|
||||
'aliases'.format(cols=len(self.columns),
|
||||
alias=len(self.header)))
|
||||
else:
|
||||
colnames = self.header
|
||||
|
||||
for colindex, colname in enumerate(colnames):
|
||||
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
|
||||
header_style)
|
||||
|
||||
def _format_header(self):
|
||||
if isinstance(self.columns, MultiIndex):
|
||||
gen = self._format_header_mi()
|
||||
else:
|
||||
gen = self._format_header_regular()
|
||||
|
||||
gen2 = ()
|
||||
if self.df.index.names:
|
||||
row = [x if x is not None else ''
|
||||
for x in self.df.index.names] + [''] * len(self.columns)
|
||||
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
|
||||
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
|
||||
for colindex, val in enumerate(row))
|
||||
self.rowcounter += 1
|
||||
return itertools.chain(gen, gen2)
|
||||
|
||||
def _format_body(self):
|
||||
|
||||
if isinstance(self.df.index, MultiIndex):
|
||||
return self._format_hierarchical_rows()
|
||||
else:
|
||||
return self._format_regular_rows()
|
||||
|
||||
def _format_regular_rows(self):
|
||||
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
|
||||
if has_aliases or self.header:
|
||||
self.rowcounter += 1
|
||||
|
||||
# output index and index_label?
|
||||
if self.index:
|
||||
# check aliases
|
||||
# if list only take first as this is not a MultiIndex
|
||||
if (self.index_label and
|
||||
isinstance(self.index_label, (list, tuple, np.ndarray,
|
||||
Index))):
|
||||
index_label = self.index_label[0]
|
||||
# if string good to go
|
||||
elif self.index_label and isinstance(self.index_label, str):
|
||||
index_label = self.index_label
|
||||
else:
|
||||
index_label = self.df.index.names[0]
|
||||
|
||||
if isinstance(self.columns, MultiIndex):
|
||||
self.rowcounter += 1
|
||||
|
||||
if index_label and self.header is not False:
|
||||
yield ExcelCell(self.rowcounter - 1, 0, index_label,
|
||||
header_style)
|
||||
|
||||
# write index_values
|
||||
index_values = self.df.index
|
||||
if isinstance(self.df.index, PeriodIndex):
|
||||
index_values = self.df.index.to_timestamp()
|
||||
|
||||
for idx, idxval in enumerate(index_values):
|
||||
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
|
||||
|
||||
coloffset = 1
|
||||
else:
|
||||
coloffset = 0
|
||||
|
||||
for cell in self._generate_body(coloffset):
|
||||
yield cell
|
||||
|
||||
def _format_hierarchical_rows(self):
|
||||
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
|
||||
if has_aliases or self.header:
|
||||
self.rowcounter += 1
|
||||
|
||||
gcolidx = 0
|
||||
|
||||
if self.index:
|
||||
index_labels = self.df.index.names
|
||||
# check for aliases
|
||||
if (self.index_label and
|
||||
isinstance(self.index_label, (list, tuple, np.ndarray,
|
||||
Index))):
|
||||
index_labels = self.index_label
|
||||
|
||||
# MultiIndex columns require an extra row
|
||||
# with index names (blank if None) for
|
||||
# unambigous round-trip, unless not merging,
|
||||
# in which case the names all go on one row Issue #11328
|
||||
if isinstance(self.columns, MultiIndex) and self.merge_cells:
|
||||
self.rowcounter += 1
|
||||
|
||||
# if index labels are not empty go ahead and dump
|
||||
if com._any_not_none(*index_labels) and self.header is not False:
|
||||
|
||||
for cidx, name in enumerate(index_labels):
|
||||
yield ExcelCell(self.rowcounter - 1, cidx, name,
|
||||
header_style)
|
||||
|
||||
if self.merge_cells:
|
||||
# Format hierarchical rows as merged cells.
|
||||
level_strs = self.df.index.format(sparsify=True, adjoin=False,
|
||||
names=False)
|
||||
level_lengths = get_level_lengths(level_strs)
|
||||
|
||||
for spans, levels, labels in zip(level_lengths,
|
||||
self.df.index.levels,
|
||||
self.df.index.labels):
|
||||
|
||||
values = levels.take(labels,
|
||||
allow_fill=levels._can_hold_na,
|
||||
fill_value=True)
|
||||
|
||||
for i in spans:
|
||||
if spans[i] > 1:
|
||||
yield ExcelCell(self.rowcounter + i, gcolidx,
|
||||
values[i], header_style,
|
||||
self.rowcounter + i + spans[i] - 1,
|
||||
gcolidx)
|
||||
else:
|
||||
yield ExcelCell(self.rowcounter + i, gcolidx,
|
||||
values[i], header_style)
|
||||
gcolidx += 1
|
||||
|
||||
else:
|
||||
# Format hierarchical rows with non-merged values.
|
||||
for indexcolvals in zip(*self.df.index):
|
||||
for idx, indexcolval in enumerate(indexcolvals):
|
||||
yield ExcelCell(self.rowcounter + idx, gcolidx,
|
||||
indexcolval, header_style)
|
||||
gcolidx += 1
|
||||
|
||||
for cell in self._generate_body(gcolidx):
|
||||
yield cell
|
||||
|
||||
def _generate_body(self, coloffset):
|
||||
if self.styler is None:
|
||||
styles = None
|
||||
else:
|
||||
styles = self.styler._compute().ctx
|
||||
if not styles:
|
||||
styles = None
|
||||
xlstyle = None
|
||||
|
||||
# Write the body of the frame data series by series.
|
||||
for colidx in range(len(self.columns)):
|
||||
series = self.df.iloc[:, colidx]
|
||||
for i, val in enumerate(series):
|
||||
if styles is not None:
|
||||
xlstyle = self.style_converter(';'.join(styles[i, colidx]))
|
||||
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val,
|
||||
xlstyle)
|
||||
|
||||
def get_formatted_cells(self):
|
||||
for cell in itertools.chain(self._format_header(),
|
||||
self._format_body()):
|
||||
cell.val = self._format_value(cell.val)
|
||||
yield cell
|
||||
|
||||
def write(self, writer, sheet_name='Sheet1', startrow=0,
|
||||
startcol=0, freeze_panes=None, engine=None):
|
||||
"""
|
||||
writer : string or ExcelWriter object
|
||||
File path or existing ExcelWriter
|
||||
sheet_name : string, default 'Sheet1'
|
||||
Name of sheet which will contain DataFrame
|
||||
startrow :
|
||||
upper left cell row to dump data frame
|
||||
startcol :
|
||||
upper left cell column to dump data frame
|
||||
freeze_panes : tuple of integer (length 2), default None
|
||||
Specifies the one-based bottommost row and rightmost column that
|
||||
is to be frozen
|
||||
engine : string, default None
|
||||
write engine to use if writer is a path - you can also set this
|
||||
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
|
||||
and ``io.excel.xlsm.writer``.
|
||||
"""
|
||||
from pandas.io.excel import ExcelWriter
|
||||
from pandas.io.common import _stringify_path
|
||||
|
||||
if isinstance(writer, ExcelWriter):
|
||||
need_save = False
|
||||
else:
|
||||
writer = ExcelWriter(_stringify_path(writer), engine=engine)
|
||||
need_save = True
|
||||
|
||||
formatted_cells = self.get_formatted_cells()
|
||||
writer.write_cells(formatted_cells, sheet_name,
|
||||
startrow=startrow, startcol=startcol,
|
||||
freeze_panes=freeze_panes)
|
||||
if need_save:
|
||||
writer.save()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,506 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Module for formatting output data in HTML.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
import pandas.core.common as com
|
||||
from pandas.core.index import MultiIndex
|
||||
from pandas import compat
|
||||
from pandas.compat import (lzip, range, map, zip, u,
|
||||
OrderedDict, unichr)
|
||||
from pandas.core.config import get_option
|
||||
from pandas.io.formats.printing import pprint_thing
|
||||
from pandas.io.formats.format import (get_level_lengths,
|
||||
buffer_put_lines)
|
||||
from pandas.io.formats.format import TableFormatter
|
||||
|
||||
|
||||
class HTMLFormatter(TableFormatter):
|
||||
|
||||
indent_delta = 2
|
||||
|
||||
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
|
||||
notebook=False, border=None, table_id=None):
|
||||
self.fmt = formatter
|
||||
self.classes = classes
|
||||
|
||||
self.frame = self.fmt.frame
|
||||
self.columns = self.fmt.tr_frame.columns
|
||||
self.elements = []
|
||||
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
|
||||
self.escape = self.fmt.kwds.get('escape', True)
|
||||
|
||||
self.max_rows = max_rows or len(self.fmt.frame)
|
||||
self.max_cols = max_cols or len(self.fmt.columns)
|
||||
self.show_dimensions = self.fmt.show_dimensions
|
||||
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
|
||||
self.max_cols < len(self.fmt.columns))
|
||||
self.notebook = notebook
|
||||
if border is None:
|
||||
border = get_option('display.html.border')
|
||||
self.border = border
|
||||
self.table_id = table_id
|
||||
|
||||
def write(self, s, indent=0):
|
||||
rs = pprint_thing(s)
|
||||
self.elements.append(' ' * indent + rs)
|
||||
|
||||
def write_th(self, s, indent=0, tags=None):
|
||||
if self.fmt.col_space is not None and self.fmt.col_space > 0:
|
||||
tags = (tags or "")
|
||||
tags += ('style="min-width: {colspace};"'
|
||||
.format(colspace=self.fmt.col_space))
|
||||
|
||||
return self._write_cell(s, kind='th', indent=indent, tags=tags)
|
||||
|
||||
def write_td(self, s, indent=0, tags=None):
|
||||
return self._write_cell(s, kind='td', indent=indent, tags=tags)
|
||||
|
||||
def _write_cell(self, s, kind='td', indent=0, tags=None):
|
||||
if tags is not None:
|
||||
start_tag = '<{kind} {tags}>'.format(kind=kind, tags=tags)
|
||||
else:
|
||||
start_tag = '<{kind}>'.format(kind=kind)
|
||||
|
||||
if self.escape:
|
||||
# escape & first to prevent double escaping of &
|
||||
esc = OrderedDict([('&', r'&'), ('<', r'<'),
|
||||
('>', r'>')])
|
||||
else:
|
||||
esc = {}
|
||||
rs = pprint_thing(s, escape_chars=esc).strip()
|
||||
self.write(u'{start}{rs}</{kind}>'
|
||||
.format(start=start_tag, rs=rs, kind=kind), indent)
|
||||
|
||||
def write_tr(self, line, indent=0, indent_delta=4, header=False,
|
||||
align=None, tags=None, nindex_levels=0):
|
||||
if tags is None:
|
||||
tags = {}
|
||||
|
||||
if align is None:
|
||||
self.write('<tr>', indent)
|
||||
else:
|
||||
self.write('<tr style="text-align: {align};">'
|
||||
.format(align=align), indent)
|
||||
indent += indent_delta
|
||||
|
||||
for i, s in enumerate(line):
|
||||
val_tag = tags.get(i, None)
|
||||
if header or (self.bold_rows and i < nindex_levels):
|
||||
self.write_th(s, indent, tags=val_tag)
|
||||
else:
|
||||
self.write_td(s, indent, tags=val_tag)
|
||||
|
||||
indent -= indent_delta
|
||||
self.write('</tr>', indent)
|
||||
|
||||
def write_style(self):
|
||||
# We use the "scoped" attribute here so that the desired
|
||||
# style properties for the data frame are not then applied
|
||||
# throughout the entire notebook.
|
||||
template_first = """\
|
||||
<style scoped>"""
|
||||
template_last = """\
|
||||
</style>"""
|
||||
template_select = """\
|
||||
.dataframe %s {
|
||||
%s: %s;
|
||||
}"""
|
||||
element_props = [('tbody tr th:only-of-type',
|
||||
'vertical-align',
|
||||
'middle'),
|
||||
('tbody tr th',
|
||||
'vertical-align',
|
||||
'top')]
|
||||
if isinstance(self.columns, MultiIndex):
|
||||
element_props.append(('thead tr th',
|
||||
'text-align',
|
||||
'left'))
|
||||
if all((self.fmt.has_index_names,
|
||||
self.fmt.index,
|
||||
self.fmt.show_index_names)):
|
||||
element_props.append(('thead tr:last-of-type th',
|
||||
'text-align',
|
||||
'right'))
|
||||
else:
|
||||
element_props.append(('thead th',
|
||||
'text-align',
|
||||
'right'))
|
||||
template_mid = '\n\n'.join(map(lambda t: template_select % t,
|
||||
element_props))
|
||||
template = dedent('\n'.join((template_first,
|
||||
template_mid,
|
||||
template_last)))
|
||||
if self.notebook:
|
||||
self.write(template)
|
||||
|
||||
def write_result(self, buf):
|
||||
indent = 0
|
||||
id_section = ""
|
||||
frame = self.frame
|
||||
|
||||
_classes = ['dataframe'] # Default class.
|
||||
use_mathjax = get_option("display.html.use_mathjax")
|
||||
if not use_mathjax:
|
||||
_classes.append('tex2jax_ignore')
|
||||
if self.classes is not None:
|
||||
if isinstance(self.classes, str):
|
||||
self.classes = self.classes.split()
|
||||
if not isinstance(self.classes, (list, tuple)):
|
||||
raise AssertionError('classes must be list or tuple, not {typ}'
|
||||
.format(typ=type(self.classes)))
|
||||
_classes.extend(self.classes)
|
||||
|
||||
if self.notebook:
|
||||
div_style = ''
|
||||
try:
|
||||
import IPython
|
||||
if IPython.__version__ < LooseVersion('3.0.0'):
|
||||
div_style = ' style="max-width:1500px;overflow:auto;"'
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
|
||||
self.write('<div{style}>'.format(style=div_style))
|
||||
|
||||
self.write_style()
|
||||
|
||||
if self.table_id is not None:
|
||||
id_section = ' id="{table_id}"'.format(table_id=self.table_id)
|
||||
self.write('<table border="{border}" class="{cls}"{id_section}>'
|
||||
.format(border=self.border, cls=' '.join(_classes),
|
||||
id_section=id_section), indent)
|
||||
|
||||
indent += self.indent_delta
|
||||
indent = self._write_header(indent)
|
||||
indent = self._write_body(indent)
|
||||
|
||||
self.write('</table>', indent)
|
||||
if self.should_show_dimensions:
|
||||
by = chr(215) if compat.PY3 else unichr(215) # ×
|
||||
self.write(u('<p>{rows} rows {by} {cols} columns</p>')
|
||||
.format(rows=len(frame),
|
||||
by=by,
|
||||
cols=len(frame.columns)))
|
||||
|
||||
if self.notebook:
|
||||
self.write('</div>')
|
||||
|
||||
buffer_put_lines(buf, self.elements)
|
||||
|
||||
def _write_header(self, indent):
|
||||
truncate_h = self.fmt.truncate_h
|
||||
row_levels = self.frame.index.nlevels
|
||||
if not self.fmt.header:
|
||||
# write nothing
|
||||
return indent
|
||||
|
||||
def _column_header():
|
||||
if self.fmt.index:
|
||||
row = [''] * (self.frame.index.nlevels - 1)
|
||||
else:
|
||||
row = []
|
||||
|
||||
if isinstance(self.columns, MultiIndex):
|
||||
if self.fmt.has_column_names and self.fmt.index:
|
||||
row.append(single_column_table(self.columns.names))
|
||||
else:
|
||||
row.append('')
|
||||
style = "text-align: {just};".format(just=self.fmt.justify)
|
||||
row.extend([single_column_table(c, self.fmt.justify, style)
|
||||
for c in self.columns])
|
||||
else:
|
||||
if self.fmt.index:
|
||||
row.append(self.columns.name or '')
|
||||
row.extend(self.columns)
|
||||
return row
|
||||
|
||||
self.write('<thead>', indent)
|
||||
row = []
|
||||
|
||||
indent += self.indent_delta
|
||||
|
||||
if isinstance(self.columns, MultiIndex):
|
||||
template = 'colspan="{span:d}" halign="left"'
|
||||
|
||||
if self.fmt.sparsify:
|
||||
# GH3547
|
||||
sentinel = com.sentinel_factory()
|
||||
else:
|
||||
sentinel = None
|
||||
levels = self.columns.format(sparsify=sentinel, adjoin=False,
|
||||
names=False)
|
||||
level_lengths = get_level_lengths(levels, sentinel)
|
||||
inner_lvl = len(level_lengths) - 1
|
||||
for lnum, (records, values) in enumerate(zip(level_lengths,
|
||||
levels)):
|
||||
if truncate_h:
|
||||
# modify the header lines
|
||||
ins_col = self.fmt.tr_col_num
|
||||
if self.fmt.sparsify:
|
||||
recs_new = {}
|
||||
# Increment tags after ... col.
|
||||
for tag, span in list(records.items()):
|
||||
if tag >= ins_col:
|
||||
recs_new[tag + 1] = span
|
||||
elif tag + span > ins_col:
|
||||
recs_new[tag] = span + 1
|
||||
if lnum == inner_lvl:
|
||||
values = (values[:ins_col] + (u('...'),) +
|
||||
values[ins_col:])
|
||||
else:
|
||||
# sparse col headers do not receive a ...
|
||||
values = (values[:ins_col] +
|
||||
(values[ins_col - 1], ) +
|
||||
values[ins_col:])
|
||||
else:
|
||||
recs_new[tag] = span
|
||||
# if ins_col lies between tags, all col headers
|
||||
# get ...
|
||||
if tag + span == ins_col:
|
||||
recs_new[ins_col] = 1
|
||||
values = (values[:ins_col] + (u('...'),) +
|
||||
values[ins_col:])
|
||||
records = recs_new
|
||||
inner_lvl = len(level_lengths) - 1
|
||||
if lnum == inner_lvl:
|
||||
records[ins_col] = 1
|
||||
else:
|
||||
recs_new = {}
|
||||
for tag, span in list(records.items()):
|
||||
if tag >= ins_col:
|
||||
recs_new[tag + 1] = span
|
||||
else:
|
||||
recs_new[tag] = span
|
||||
recs_new[ins_col] = 1
|
||||
records = recs_new
|
||||
values = (values[:ins_col] + [u('...')] +
|
||||
values[ins_col:])
|
||||
|
||||
name = self.columns.names[lnum]
|
||||
row = [''] * (row_levels - 1) + ['' if name is None else
|
||||
pprint_thing(name)]
|
||||
|
||||
if row == [""] and self.fmt.index is False:
|
||||
row = []
|
||||
|
||||
tags = {}
|
||||
j = len(row)
|
||||
for i, v in enumerate(values):
|
||||
if i in records:
|
||||
if records[i] > 1:
|
||||
tags[j] = template.format(span=records[i])
|
||||
else:
|
||||
continue
|
||||
j += 1
|
||||
row.append(v)
|
||||
self.write_tr(row, indent, self.indent_delta, tags=tags,
|
||||
header=True)
|
||||
else:
|
||||
col_row = _column_header()
|
||||
align = self.fmt.justify
|
||||
|
||||
if truncate_h:
|
||||
ins_col = row_levels + self.fmt.tr_col_num
|
||||
col_row.insert(ins_col, '...')
|
||||
|
||||
self.write_tr(col_row, indent, self.indent_delta, header=True,
|
||||
align=align)
|
||||
|
||||
if all((self.fmt.has_index_names,
|
||||
self.fmt.index,
|
||||
self.fmt.show_index_names)):
|
||||
row = ([x if x is not None else ''
|
||||
for x in self.frame.index.names] +
|
||||
[''] * min(len(self.columns), self.max_cols))
|
||||
if truncate_h:
|
||||
ins_col = row_levels + self.fmt.tr_col_num
|
||||
row.insert(ins_col, '')
|
||||
self.write_tr(row, indent, self.indent_delta, header=True)
|
||||
|
||||
indent -= self.indent_delta
|
||||
self.write('</thead>', indent)
|
||||
|
||||
return indent
|
||||
|
||||
def _write_body(self, indent):
|
||||
self.write('<tbody>', indent)
|
||||
indent += self.indent_delta
|
||||
|
||||
fmt_values = {}
|
||||
for i in range(min(len(self.columns), self.max_cols)):
|
||||
fmt_values[i] = self.fmt._format_col(i)
|
||||
|
||||
# write values
|
||||
if self.fmt.index:
|
||||
if isinstance(self.frame.index, MultiIndex):
|
||||
self._write_hierarchical_rows(fmt_values, indent)
|
||||
else:
|
||||
self._write_regular_rows(fmt_values, indent)
|
||||
else:
|
||||
for i in range(min(len(self.frame), self.max_rows)):
|
||||
row = [fmt_values[j][i] for j in range(len(self.columns))]
|
||||
self.write_tr(row, indent, self.indent_delta, tags=None)
|
||||
|
||||
indent -= self.indent_delta
|
||||
self.write('</tbody>', indent)
|
||||
indent -= self.indent_delta
|
||||
|
||||
return indent
|
||||
|
||||
def _write_regular_rows(self, fmt_values, indent):
|
||||
truncate_h = self.fmt.truncate_h
|
||||
truncate_v = self.fmt.truncate_v
|
||||
|
||||
ncols = len(self.fmt.tr_frame.columns)
|
||||
nrows = len(self.fmt.tr_frame)
|
||||
fmt = self.fmt._get_formatter('__index__')
|
||||
if fmt is not None:
|
||||
index_values = self.fmt.tr_frame.index.map(fmt)
|
||||
else:
|
||||
index_values = self.fmt.tr_frame.index.format()
|
||||
|
||||
row = []
|
||||
for i in range(nrows):
|
||||
|
||||
if truncate_v and i == (self.fmt.tr_row_num):
|
||||
str_sep_row = ['...' for ele in row]
|
||||
self.write_tr(str_sep_row, indent, self.indent_delta,
|
||||
tags=None, nindex_levels=1)
|
||||
|
||||
row = []
|
||||
row.append(index_values[i])
|
||||
row.extend(fmt_values[j][i] for j in range(ncols))
|
||||
|
||||
if truncate_h:
|
||||
dot_col_ix = self.fmt.tr_col_num + 1
|
||||
row.insert(dot_col_ix, '...')
|
||||
self.write_tr(row, indent, self.indent_delta, tags=None,
|
||||
nindex_levels=1)
|
||||
|
||||
def _write_hierarchical_rows(self, fmt_values, indent):
|
||||
template = 'rowspan="{span}" valign="top"'
|
||||
|
||||
truncate_h = self.fmt.truncate_h
|
||||
truncate_v = self.fmt.truncate_v
|
||||
frame = self.fmt.tr_frame
|
||||
ncols = len(frame.columns)
|
||||
nrows = len(frame)
|
||||
row_levels = self.frame.index.nlevels
|
||||
|
||||
idx_values = frame.index.format(sparsify=False, adjoin=False,
|
||||
names=False)
|
||||
idx_values = lzip(*idx_values)
|
||||
|
||||
if self.fmt.sparsify:
|
||||
# GH3547
|
||||
sentinel = com.sentinel_factory()
|
||||
levels = frame.index.format(sparsify=sentinel, adjoin=False,
|
||||
names=False)
|
||||
|
||||
level_lengths = get_level_lengths(levels, sentinel)
|
||||
inner_lvl = len(level_lengths) - 1
|
||||
if truncate_v:
|
||||
# Insert ... row and adjust idx_values and
|
||||
# level_lengths to take this into account.
|
||||
ins_row = self.fmt.tr_row_num
|
||||
inserted = False
|
||||
for lnum, records in enumerate(level_lengths):
|
||||
rec_new = {}
|
||||
for tag, span in list(records.items()):
|
||||
if tag >= ins_row:
|
||||
rec_new[tag + 1] = span
|
||||
elif tag + span > ins_row:
|
||||
rec_new[tag] = span + 1
|
||||
|
||||
# GH 14882 - Make sure insertion done once
|
||||
if not inserted:
|
||||
dot_row = list(idx_values[ins_row - 1])
|
||||
dot_row[-1] = u('...')
|
||||
idx_values.insert(ins_row, tuple(dot_row))
|
||||
inserted = True
|
||||
else:
|
||||
dot_row = list(idx_values[ins_row])
|
||||
dot_row[inner_lvl - lnum] = u('...')
|
||||
idx_values[ins_row] = tuple(dot_row)
|
||||
else:
|
||||
rec_new[tag] = span
|
||||
# If ins_row lies between tags, all cols idx cols
|
||||
# receive ...
|
||||
if tag + span == ins_row:
|
||||
rec_new[ins_row] = 1
|
||||
if lnum == 0:
|
||||
idx_values.insert(ins_row, tuple(
|
||||
[u('...')] * len(level_lengths)))
|
||||
|
||||
# GH 14882 - Place ... in correct level
|
||||
elif inserted:
|
||||
dot_row = list(idx_values[ins_row])
|
||||
dot_row[inner_lvl - lnum] = u('...')
|
||||
idx_values[ins_row] = tuple(dot_row)
|
||||
level_lengths[lnum] = rec_new
|
||||
|
||||
level_lengths[inner_lvl][ins_row] = 1
|
||||
for ix_col in range(len(fmt_values)):
|
||||
fmt_values[ix_col].insert(ins_row, '...')
|
||||
nrows += 1
|
||||
|
||||
for i in range(nrows):
|
||||
row = []
|
||||
tags = {}
|
||||
|
||||
sparse_offset = 0
|
||||
j = 0
|
||||
for records, v in zip(level_lengths, idx_values[i]):
|
||||
if i in records:
|
||||
if records[i] > 1:
|
||||
tags[j] = template.format(span=records[i])
|
||||
else:
|
||||
sparse_offset += 1
|
||||
continue
|
||||
|
||||
j += 1
|
||||
row.append(v)
|
||||
|
||||
row.extend(fmt_values[j][i] for j in range(ncols))
|
||||
if truncate_h:
|
||||
row.insert(row_levels - sparse_offset +
|
||||
self.fmt.tr_col_num, '...')
|
||||
self.write_tr(row, indent, self.indent_delta, tags=tags,
|
||||
nindex_levels=len(levels) - sparse_offset)
|
||||
else:
|
||||
for i in range(len(frame)):
|
||||
idx_values = list(zip(*frame.index.format(
|
||||
sparsify=False, adjoin=False, names=False)))
|
||||
row = []
|
||||
row.extend(idx_values[i])
|
||||
row.extend(fmt_values[j][i] for j in range(ncols))
|
||||
if truncate_h:
|
||||
row.insert(row_levels + self.fmt.tr_col_num, '...')
|
||||
self.write_tr(row, indent, self.indent_delta, tags=None,
|
||||
nindex_levels=frame.index.nlevels)
|
||||
|
||||
|
||||
def single_column_table(column, align=None, style=None):
|
||||
table = '<table'
|
||||
if align is not None:
|
||||
table += (' align="{align}"'.format(align=align))
|
||||
if style is not None:
|
||||
table += (' style="{style}"'.format(style=style))
|
||||
table += '><tbody>'
|
||||
for i in column:
|
||||
table += ('<tr><td>{i!s}</td></tr>'.format(i=i))
|
||||
table += '</tbody></table>'
|
||||
return table
|
||||
|
||||
|
||||
def single_row_table(row): # pragma: no cover
|
||||
table = '<table><tbody><tr>'
|
||||
for i in row:
|
||||
table += ('<td>{i!s}</td>'.format(i=i))
|
||||
table += '</tr></tbody></table>'
|
||||
return table
|
||||
@@ -1,243 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Module for formatting output data in Latex.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from pandas.core.index import MultiIndex
|
||||
from pandas import compat
|
||||
from pandas.compat import range, map, zip, u
|
||||
from pandas.io.formats.format import TableFormatter
|
||||
import numpy as np
|
||||
|
||||
|
||||
class LatexFormatter(TableFormatter):
|
||||
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
|
||||
output.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
formatter : `DataFrameFormatter`
|
||||
column_format : str, default None
|
||||
The columns format as specified in `LaTeX table format
|
||||
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
|
||||
longtable : boolean, default False
|
||||
Use a longtable environment instead of tabular.
|
||||
|
||||
See Also
|
||||
--------
|
||||
HTMLFormatter
|
||||
"""
|
||||
|
||||
def __init__(self, formatter, column_format=None, longtable=False,
|
||||
multicolumn=False, multicolumn_format=None, multirow=False):
|
||||
self.fmt = formatter
|
||||
self.frame = self.fmt.frame
|
||||
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
|
||||
self.column_format = column_format
|
||||
self.longtable = longtable
|
||||
self.multicolumn = multicolumn
|
||||
self.multicolumn_format = multicolumn_format
|
||||
self.multirow = multirow
|
||||
|
||||
def write_result(self, buf):
|
||||
"""
|
||||
Render a DataFrame to a LaTeX tabular/longtable environment output.
|
||||
"""
|
||||
|
||||
# string representation of the columns
|
||||
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
|
||||
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
|
||||
.format(name=type(self.frame).__name__,
|
||||
col=self.frame.columns,
|
||||
idx=self.frame.index))
|
||||
strcols = [[info_line]]
|
||||
else:
|
||||
strcols = self.fmt._to_str_columns()
|
||||
|
||||
def get_col_type(dtype):
|
||||
if issubclass(dtype.type, np.number):
|
||||
return 'r'
|
||||
else:
|
||||
return 'l'
|
||||
|
||||
# reestablish the MultiIndex that has been joined by _to_str_column
|
||||
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
|
||||
out = self.frame.index.format(
|
||||
adjoin=False, sparsify=self.fmt.sparsify,
|
||||
names=self.fmt.has_index_names, na_rep=self.fmt.na_rep
|
||||
)
|
||||
|
||||
# index.format will sparsify repeated entries with empty strings
|
||||
# so pad these with some empty space
|
||||
def pad_empties(x):
|
||||
for pad in reversed(x):
|
||||
if pad:
|
||||
break
|
||||
return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]]
|
||||
out = (pad_empties(i) for i in out)
|
||||
|
||||
# Add empty spaces for each column level
|
||||
clevels = self.frame.columns.nlevels
|
||||
out = [[' ' * len(i[-1])] * clevels + i for i in out]
|
||||
|
||||
# Add the column names to the last index column
|
||||
cnames = self.frame.columns.names
|
||||
if any(cnames):
|
||||
new_names = [i if i else '{}' for i in cnames]
|
||||
out[self.frame.index.nlevels - 1][:clevels] = new_names
|
||||
|
||||
# Get rid of old multiindex column and add new ones
|
||||
strcols = out + strcols[1:]
|
||||
|
||||
column_format = self.column_format
|
||||
if column_format is None:
|
||||
dtypes = self.frame.dtypes._values
|
||||
column_format = ''.join(map(get_col_type, dtypes))
|
||||
if self.fmt.index:
|
||||
index_format = 'l' * self.frame.index.nlevels
|
||||
column_format = index_format + column_format
|
||||
elif not isinstance(column_format,
|
||||
compat.string_types): # pragma: no cover
|
||||
raise AssertionError('column_format must be str or unicode, '
|
||||
'not {typ}'.format(typ=type(column_format)))
|
||||
|
||||
if not self.longtable:
|
||||
buf.write('\\begin{{tabular}}{{{fmt}}}\n'
|
||||
.format(fmt=column_format))
|
||||
buf.write('\\toprule\n')
|
||||
else:
|
||||
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
|
||||
.format(fmt=column_format))
|
||||
buf.write('\\toprule\n')
|
||||
|
||||
ilevels = self.frame.index.nlevels
|
||||
clevels = self.frame.columns.nlevels
|
||||
nlevels = clevels
|
||||
if self.fmt.has_index_names and self.fmt.show_index_names:
|
||||
nlevels += 1
|
||||
strrows = list(zip(*strcols))
|
||||
self.clinebuf = []
|
||||
|
||||
for i, row in enumerate(strrows):
|
||||
if i == nlevels and self.fmt.header:
|
||||
buf.write('\\midrule\n') # End of header
|
||||
if self.longtable:
|
||||
buf.write('\\endhead\n')
|
||||
buf.write('\\midrule\n')
|
||||
buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next '
|
||||
'page}}}} \\\\\n'.format(n=len(row)))
|
||||
buf.write('\\midrule\n')
|
||||
buf.write('\\endfoot\n\n')
|
||||
buf.write('\\bottomrule\n')
|
||||
buf.write('\\endlastfoot\n')
|
||||
if self.fmt.kwds.get('escape', True):
|
||||
# escape backslashes first
|
||||
crow = [(x.replace('\\', '\\textbackslash ')
|
||||
.replace('_', '\\_')
|
||||
.replace('%', '\\%').replace('$', '\\$')
|
||||
.replace('#', '\\#').replace('{', '\\{')
|
||||
.replace('}', '\\}').replace('~', '\\textasciitilde ')
|
||||
.replace('^', '\\textasciicircum ')
|
||||
.replace('&', '\\&')
|
||||
if (x and x != '{}') else '{}') for x in row]
|
||||
else:
|
||||
crow = [x if x else '{}' for x in row]
|
||||
if self.bold_rows and self.fmt.index:
|
||||
# bold row labels
|
||||
crow = ['\\textbf{{{x}}}'.format(x=x)
|
||||
if j < ilevels and x.strip() not in ['', '{}'] else x
|
||||
for j, x in enumerate(crow)]
|
||||
if i < clevels and self.fmt.header and self.multicolumn:
|
||||
# sum up columns to multicolumns
|
||||
crow = self._format_multicolumn(crow, ilevels)
|
||||
if (i >= nlevels and self.fmt.index and self.multirow and
|
||||
ilevels > 1):
|
||||
# sum up rows to multirows
|
||||
crow = self._format_multirow(crow, ilevels, i, strrows)
|
||||
buf.write(' & '.join(crow))
|
||||
buf.write(' \\\\\n')
|
||||
if self.multirow and i < len(strrows) - 1:
|
||||
self._print_cline(buf, i, len(strcols))
|
||||
|
||||
if not self.longtable:
|
||||
buf.write('\\bottomrule\n')
|
||||
buf.write('\\end{tabular}\n')
|
||||
else:
|
||||
buf.write('\\end{longtable}\n')
|
||||
|
||||
def _format_multicolumn(self, row, ilevels):
|
||||
r"""
|
||||
Combine columns belonging to a group to a single multicolumn entry
|
||||
according to self.multicolumn_format
|
||||
|
||||
e.g.:
|
||||
a & & & b & c &
|
||||
will become
|
||||
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
|
||||
"""
|
||||
row2 = list(row[:ilevels])
|
||||
ncol = 1
|
||||
coltext = ''
|
||||
|
||||
def append_col():
|
||||
# write multicolumn if needed
|
||||
if ncol > 1:
|
||||
row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
|
||||
.format(ncol=ncol, fmt=self.multicolumn_format,
|
||||
txt=coltext.strip()))
|
||||
# don't modify where not needed
|
||||
else:
|
||||
row2.append(coltext)
|
||||
for c in row[ilevels:]:
|
||||
# if next col has text, write the previous
|
||||
if c.strip():
|
||||
if coltext:
|
||||
append_col()
|
||||
coltext = c
|
||||
ncol = 1
|
||||
# if not, add it to the previous multicolumn
|
||||
else:
|
||||
ncol += 1
|
||||
# write last column name
|
||||
if coltext:
|
||||
append_col()
|
||||
return row2
|
||||
|
||||
def _format_multirow(self, row, ilevels, i, rows):
|
||||
r"""
|
||||
Check following rows, whether row should be a multirow
|
||||
|
||||
e.g.: becomes:
|
||||
a & 0 & \multirow{2}{*}{a} & 0 &
|
||||
& 1 & & 1 &
|
||||
b & 0 & \cline{1-2}
|
||||
b & 0 &
|
||||
"""
|
||||
for j in range(ilevels):
|
||||
if row[j].strip():
|
||||
nrow = 1
|
||||
for r in rows[i + 1:]:
|
||||
if not r[j].strip():
|
||||
nrow += 1
|
||||
else:
|
||||
break
|
||||
if nrow > 1:
|
||||
# overwrite non-multirow entry
|
||||
row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
|
||||
nrow=nrow, row=row[j].strip())
|
||||
# save when to end the current block with \cline
|
||||
self.clinebuf.append([i + nrow - 1, j + 1])
|
||||
return row
|
||||
|
||||
def _print_cline(self, buf, i, icol):
|
||||
"""
|
||||
Print clines after multirow-blocks are finished
|
||||
"""
|
||||
for cl in self.clinebuf:
|
||||
if cl[0] == i:
|
||||
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
|
||||
.format(cl=cl[1], icol=icol))
|
||||
# remove entries that have been written to buffer
|
||||
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
|
||||
@@ -1,263 +0,0 @@
|
||||
"""
|
||||
printing tools
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pandas.core.dtypes.inference import is_sequence
|
||||
from pandas import compat
|
||||
from pandas.compat import u
|
||||
from pandas.core.config import get_option
|
||||
|
||||
|
||||
def adjoin(space, *lists, **kwargs):
|
||||
"""
|
||||
Glues together two sets of strings using the amount of space requested.
|
||||
The idea is to prettify.
|
||||
|
||||
----------
|
||||
space : int
|
||||
number of spaces for padding
|
||||
lists : str
|
||||
list of str which being joined
|
||||
strlen : callable
|
||||
function used to calculate the length of each str. Needed for unicode
|
||||
handling.
|
||||
justfunc : callable
|
||||
function used to justify str. Needed for unicode handling.
|
||||
"""
|
||||
strlen = kwargs.pop('strlen', len)
|
||||
justfunc = kwargs.pop('justfunc', justify)
|
||||
|
||||
out_lines = []
|
||||
newLists = []
|
||||
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
|
||||
# not the last one
|
||||
lengths.append(max(map(len, lists[-1])))
|
||||
maxLen = max(map(len, lists))
|
||||
for i, lst in enumerate(lists):
|
||||
nl = justfunc(lst, lengths[i], mode='left')
|
||||
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
|
||||
newLists.append(nl)
|
||||
toJoin = zip(*newLists)
|
||||
for lines in toJoin:
|
||||
out_lines.append(_join_unicode(lines))
|
||||
return _join_unicode(out_lines, sep='\n')
|
||||
|
||||
|
||||
def justify(texts, max_len, mode='right'):
|
||||
"""
|
||||
Perform ljust, center, rjust against string or list-like
|
||||
"""
|
||||
if mode == 'left':
|
||||
return [x.ljust(max_len) for x in texts]
|
||||
elif mode == 'center':
|
||||
return [x.center(max_len) for x in texts]
|
||||
else:
|
||||
return [x.rjust(max_len) for x in texts]
|
||||
|
||||
|
||||
def _join_unicode(lines, sep=''):
|
||||
try:
|
||||
return sep.join(lines)
|
||||
except UnicodeDecodeError:
|
||||
sep = compat.text_type(sep)
|
||||
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
|
||||
for x in lines])
|
||||
|
||||
|
||||
# Unicode consolidation
|
||||
# ---------------------
|
||||
#
|
||||
# pprinting utility functions for generating Unicode text or
|
||||
# bytes(3.x)/str(2.x) representations of objects.
|
||||
# Try to use these as much as possible rather then rolling your own.
|
||||
#
|
||||
# When to use
|
||||
# -----------
|
||||
#
|
||||
# 1) If you're writing code internal to pandas (no I/O directly involved),
|
||||
# use pprint_thing().
|
||||
#
|
||||
# It will always return unicode text which can handled by other
|
||||
# parts of the package without breakage.
|
||||
#
|
||||
# 2) if you need to write something out to file, use
|
||||
# pprint_thing_encoded(encoding).
|
||||
#
|
||||
# If no encoding is specified, it defaults to utf-8. Since encoding pure
|
||||
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
|
||||
# working with straight ascii.
|
||||
|
||||
|
||||
def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
|
||||
"""
|
||||
internal. pprinter for iterables. you should probably use pprint_thing()
|
||||
rather then calling this directly.
|
||||
|
||||
bounds length of printed sequence, depending on options
|
||||
"""
|
||||
if isinstance(seq, set):
|
||||
fmt = u("{{{body}}}")
|
||||
else:
|
||||
fmt = u("[{body}]") if hasattr(seq, '__setitem__') else u("({body})")
|
||||
|
||||
if max_seq_items is False:
|
||||
nitems = len(seq)
|
||||
else:
|
||||
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
|
||||
|
||||
s = iter(seq)
|
||||
r = []
|
||||
for i in range(min(nitems, len(seq))): # handle sets, no slicing
|
||||
r.append(pprint_thing(
|
||||
next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
|
||||
body = ", ".join(r)
|
||||
|
||||
if nitems < len(seq):
|
||||
body += ", ..."
|
||||
elif isinstance(seq, tuple) and len(seq) == 1:
|
||||
body += ','
|
||||
|
||||
return fmt.format(body=body)
|
||||
|
||||
|
||||
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
|
||||
"""
|
||||
internal. pprinter for iterables. you should probably use pprint_thing()
|
||||
rather then calling this directly.
|
||||
"""
|
||||
fmt = u("{{{things}}}")
|
||||
pairs = []
|
||||
|
||||
pfmt = u("{key}: {val}")
|
||||
|
||||
if max_seq_items is False:
|
||||
nitems = len(seq)
|
||||
else:
|
||||
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
|
||||
|
||||
for k, v in list(seq.items())[:nitems]:
|
||||
pairs.append(
|
||||
pfmt.format(
|
||||
key=pprint_thing(k, _nest_lvl + 1,
|
||||
max_seq_items=max_seq_items, **kwds),
|
||||
val=pprint_thing(v, _nest_lvl + 1,
|
||||
max_seq_items=max_seq_items, **kwds)))
|
||||
|
||||
if nitems < len(seq):
|
||||
return fmt.format(things=", ".join(pairs) + ", ...")
|
||||
else:
|
||||
return fmt.format(things=", ".join(pairs))
|
||||
|
||||
|
||||
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
|
||||
quote_strings=False, max_seq_items=None):
|
||||
"""
|
||||
This function is the sanctioned way of converting objects
|
||||
to a unicode representation.
|
||||
|
||||
properly handles nested sequences containing unicode strings
|
||||
(unicode(object) does not)
|
||||
|
||||
Parameters
|
||||
----------
|
||||
thing : anything to be formatted
|
||||
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
|
||||
with pprint_sequence, this argument is used to keep track of the
|
||||
current nesting level, and limit it.
|
||||
escape_chars : list or dict, optional
|
||||
Characters to escape. If a dict is passed the values are the
|
||||
replacements
|
||||
default_escapes : bool, default False
|
||||
Whether the input escape characters replaces or adds to the defaults
|
||||
max_seq_items : False, int, default None
|
||||
Pass thru to other pretty printers to limit sequence printing
|
||||
|
||||
Returns
|
||||
-------
|
||||
result - unicode object on py2, str on py3. Always Unicode.
|
||||
|
||||
"""
|
||||
|
||||
def as_escaped_unicode(thing, escape_chars=escape_chars):
|
||||
# Unicode is fine, else we try to decode using utf-8 and 'replace'
|
||||
# if that's not it either, we have no way of knowing and the user
|
||||
# should deal with it himself.
|
||||
|
||||
try:
|
||||
result = compat.text_type(thing) # we should try this first
|
||||
except UnicodeDecodeError:
|
||||
# either utf-8 or we replace errors
|
||||
result = str(thing).decode('utf-8', "replace")
|
||||
|
||||
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
|
||||
if isinstance(escape_chars, dict):
|
||||
if default_escapes:
|
||||
translate.update(escape_chars)
|
||||
else:
|
||||
translate = escape_chars
|
||||
escape_chars = list(escape_chars.keys())
|
||||
else:
|
||||
escape_chars = escape_chars or tuple()
|
||||
for c in escape_chars:
|
||||
result = result.replace(c, translate[c])
|
||||
|
||||
return compat.text_type(result)
|
||||
|
||||
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
|
||||
return compat.text_type(thing)
|
||||
elif (isinstance(thing, dict) and
|
||||
_nest_lvl < get_option("display.pprint_nest_depth")):
|
||||
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
|
||||
max_seq_items=max_seq_items)
|
||||
elif (is_sequence(thing) and
|
||||
_nest_lvl < get_option("display.pprint_nest_depth")):
|
||||
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
|
||||
quote_strings=quote_strings,
|
||||
max_seq_items=max_seq_items)
|
||||
elif isinstance(thing, compat.string_types) and quote_strings:
|
||||
if compat.PY3:
|
||||
fmt = u("'{thing}'")
|
||||
else:
|
||||
fmt = u("u'{thing}'")
|
||||
result = fmt.format(thing=as_escaped_unicode(thing))
|
||||
else:
|
||||
result = as_escaped_unicode(thing)
|
||||
|
||||
return compat.text_type(result) # always unicode
|
||||
|
||||
|
||||
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
|
||||
value = pprint_thing(object) # get unicode representation of object
|
||||
return value.encode(encoding, errors, **kwds)
|
||||
|
||||
|
||||
def _enable_data_resource_formatter(enable):
|
||||
if 'IPython' not in sys.modules:
|
||||
# definitely not in IPython
|
||||
return
|
||||
from IPython import get_ipython
|
||||
ip = get_ipython()
|
||||
if ip is None:
|
||||
# still not in IPython
|
||||
return
|
||||
|
||||
formatters = ip.display_formatter.formatters
|
||||
mimetype = "application/vnd.dataresource+json"
|
||||
|
||||
if enable:
|
||||
if mimetype not in formatters:
|
||||
# define tableschema formatter
|
||||
from IPython.core.formatters import BaseFormatter
|
||||
|
||||
class TableSchemaFormatter(BaseFormatter):
|
||||
print_method = '_repr_data_resource_'
|
||||
_return_type = (dict,)
|
||||
# register it:
|
||||
formatters[mimetype] = TableSchemaFormatter()
|
||||
# enable it if it's been disabled:
|
||||
formatters[mimetype].enabled = True
|
||||
else:
|
||||
# unregister tableschema mime-type
|
||||
if mimetype in formatters:
|
||||
formatters[mimetype].enabled = False
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,70 +0,0 @@
|
||||
{# Update the template_structure.html document too #}
|
||||
{%- block before_style -%}{%- endblock before_style -%}
|
||||
{% block style %}
|
||||
<style type="text/css" >
|
||||
{% block table_styles %}
|
||||
{% for s in table_styles %}
|
||||
#T_{{uuid}} {{s.selector}} {
|
||||
{% for p,val in s.props %}
|
||||
{{p}}: {{val}};
|
||||
{% endfor -%}
|
||||
}
|
||||
{%- endfor -%}
|
||||
{% endblock table_styles %}
|
||||
{% block before_cellstyle %}{% endblock before_cellstyle %}
|
||||
{% block cellstyle %}
|
||||
{%- for s in cellstyle %}
|
||||
#T_{{uuid}}{{s.selector}} {
|
||||
{% for p,val in s.props %}
|
||||
{{p}}: {{val}};
|
||||
{% endfor %}
|
||||
}
|
||||
{%- endfor -%}
|
||||
{%- endblock cellstyle %}
|
||||
</style>
|
||||
{%- endblock style %}
|
||||
{%- block before_table %}{% endblock before_table %}
|
||||
{%- block table %}
|
||||
<table id="T_{{uuid}}" {% if table_attributes %}{{ table_attributes }}{% endif %}>
|
||||
{%- block caption %}
|
||||
{%- if caption -%}
|
||||
<caption>{{caption}}</caption>
|
||||
{%- endif -%}
|
||||
{%- endblock caption %}
|
||||
{%- block thead %}
|
||||
<thead>
|
||||
{%- block before_head_rows %}{% endblock %}
|
||||
{%- for r in head %}
|
||||
{%- block head_tr scoped %}
|
||||
<tr>
|
||||
{%- for c in r %}
|
||||
{%- if c.is_visible != False %}
|
||||
<{{ c.type }} class="{{c.class}}" {{ c.attributes|join(" ") }}>{{c.value}}</{{ c.type }}>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
</tr>
|
||||
{%- endblock head_tr %}
|
||||
{%- endfor %}
|
||||
{%- block after_head_rows %}{% endblock %}
|
||||
</thead>
|
||||
{%- endblock thead %}
|
||||
{%- block tbody %}
|
||||
<tbody>
|
||||
{%- block before_rows %}{%- endblock before_rows %}
|
||||
{%- for r in body %}
|
||||
{%- block tr scoped %}
|
||||
<tr>
|
||||
{%- for c in r %}
|
||||
{%- if c.is_visible != False %}
|
||||
<{{ c.type }} id="T_{{ uuid }}{{ c.id }}" class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}>
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
</tr>
|
||||
{%- endblock tr %}
|
||||
{%- endfor %}
|
||||
{%- block after_rows %}{%- endblock after_rows %}
|
||||
</tbody>
|
||||
{%- endblock tbody %}
|
||||
</table>
|
||||
{%- endblock table %}
|
||||
{%- block after_table %}{% endblock after_table %}
|
||||
@@ -1,145 +0,0 @@
|
||||
"""
|
||||
get_terminal_size() -- return width and height of terminal as a tuple
|
||||
|
||||
code from:
|
||||
http://stackoverflow.com/questions/566746/how-to-get-console- window-width-in-
|
||||
python
|
||||
|
||||
written by
|
||||
Harco Kuppens (http://stackoverflow.com/users/825214/harco-kuppens)
|
||||
|
||||
It is mentioned in the stackoverflow response that this code works
|
||||
on linux, os x, windows and cygwin (windows).
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from pandas.compat import PY3
|
||||
|
||||
|
||||
__all__ = ['get_terminal_size', 'is_terminal']
|
||||
|
||||
|
||||
def get_terminal_size():
|
||||
"""
|
||||
Detect terminal size and return tuple = (width, height).
|
||||
|
||||
Only to be used when running in a terminal. Note that the IPython notebook,
|
||||
IPython zmq frontends, or IDLE do not run in a terminal,
|
||||
"""
|
||||
import platform
|
||||
|
||||
if PY3:
|
||||
return shutil.get_terminal_size()
|
||||
|
||||
current_os = platform.system()
|
||||
tuple_xy = None
|
||||
if current_os == 'Windows':
|
||||
tuple_xy = _get_terminal_size_windows()
|
||||
if tuple_xy is None:
|
||||
tuple_xy = _get_terminal_size_tput()
|
||||
# needed for window's python in cygwin's xterm!
|
||||
if current_os == 'Linux' or \
|
||||
current_os == 'Darwin' or \
|
||||
current_os.startswith('CYGWIN'):
|
||||
tuple_xy = _get_terminal_size_linux()
|
||||
if tuple_xy is None:
|
||||
tuple_xy = (80, 25) # default value
|
||||
return tuple_xy
|
||||
|
||||
|
||||
def is_terminal():
|
||||
"""
|
||||
Detect if Python is running in a terminal.
|
||||
|
||||
Returns True if Python is running in a terminal or False if not.
|
||||
"""
|
||||
try:
|
||||
ip = get_ipython()
|
||||
except NameError: # assume standard Python interpreter in a terminal
|
||||
return True
|
||||
else:
|
||||
if hasattr(ip, 'kernel'): # IPython as a Jupyter kernel
|
||||
return False
|
||||
else: # IPython in a terminal
|
||||
return True
|
||||
|
||||
|
||||
def _get_terminal_size_windows():
|
||||
res = None
|
||||
try:
|
||||
from ctypes import windll, create_string_buffer
|
||||
|
||||
# stdin handle is -10
|
||||
# stdout handle is -11
|
||||
# stderr handle is -12
|
||||
|
||||
h = windll.kernel32.GetStdHandle(-12)
|
||||
csbi = create_string_buffer(22)
|
||||
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
|
||||
except:
|
||||
return None
|
||||
if res:
|
||||
import struct
|
||||
(bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx,
|
||||
maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
|
||||
sizex = right - left + 1
|
||||
sizey = bottom - top + 1
|
||||
return sizex, sizey
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _get_terminal_size_tput():
|
||||
# get terminal width
|
||||
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width
|
||||
# -height-of-a-terminal-window
|
||||
try:
|
||||
import subprocess
|
||||
proc = subprocess.Popen(["tput", "cols"],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
output = proc.communicate(input=None)
|
||||
cols = int(output[0])
|
||||
proc = subprocess.Popen(["tput", "lines"],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
output = proc.communicate(input=None)
|
||||
rows = int(output[0])
|
||||
return (cols, rows)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def _get_terminal_size_linux():
|
||||
def ioctl_GWINSZ(fd):
|
||||
try:
|
||||
import fcntl
|
||||
import termios
|
||||
import struct
|
||||
cr = struct.unpack(
|
||||
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
|
||||
except:
|
||||
return None
|
||||
return cr
|
||||
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
|
||||
if not cr:
|
||||
try:
|
||||
fd = os.open(os.ctermid(), os.O_RDONLY)
|
||||
cr = ioctl_GWINSZ(fd)
|
||||
os.close(fd)
|
||||
except:
|
||||
pass
|
||||
if not cr or cr == (0, 0):
|
||||
try:
|
||||
from os import environ as env
|
||||
cr = (env['LINES'], env['COLUMNS'])
|
||||
except:
|
||||
return None
|
||||
return int(cr[1]), int(cr[0])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sizex, sizey = get_terminal_size()
|
||||
print('width = {w} height = {h}'.format(w=sizex, h=sizey))
|
||||
Reference in New Issue
Block a user