init project
This commit is contained in:
commit
3190b416db
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
.idea
|
BIN
beep-07.wav
Normal file
BIN
beep-07.wav
Normal file
Binary file not shown.
121
dzialam_viedo.py
Normal file
121
dzialam_viedo.py
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
import cv2
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
from pygame import mixer
|
||||||
|
|
||||||
|
|
||||||
|
def alert():
|
||||||
|
mixer.init()
|
||||||
|
alert = mixer.Sound('beep-07.wav')
|
||||||
|
for i in range(5):
|
||||||
|
alert.play()
|
||||||
|
time.sleep(0.1)
|
||||||
|
alert.play()
|
||||||
|
|
||||||
|
|
||||||
|
def click_event(event, x, y, flags, param):
|
||||||
|
if event == cv2.EVENT_LBUTTONDOWN:
|
||||||
|
|
||||||
|
if 620 <= y <= 703 and 20 <= x <= 169:
|
||||||
|
print('cam')
|
||||||
|
global cam_status
|
||||||
|
if cam_status == 0:
|
||||||
|
cam_status = 1
|
||||||
|
elif cam_status == 1:
|
||||||
|
cam_status = 0
|
||||||
|
elif 620 <= y <= 703 and 189 <= x <= 448:
|
||||||
|
print('doorbell')
|
||||||
|
alert()
|
||||||
|
elif 620 <= y <= 703 and 468 <= x <= 734:
|
||||||
|
print('open gate')
|
||||||
|
global a
|
||||||
|
a = datetime.datetime.now()
|
||||||
|
global globalna
|
||||||
|
globalna = 1
|
||||||
|
elif 620 <= y <= 703 and 754 <= x <= 945:
|
||||||
|
print('listen')
|
||||||
|
elif 620 <= y <= 703 and 965 <= x <= 1173:
|
||||||
|
print('pick up')
|
||||||
|
elif 620 <= y <= 703 and 1193 <= x <= 1267:
|
||||||
|
print('off')
|
||||||
|
cap.release()
|
||||||
|
out.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
||||||
|
def notification():
|
||||||
|
not1 = cv2.imread('not1.png')
|
||||||
|
frame[50:50 + 139, 800:800 + 406] = not1
|
||||||
|
b = datetime.datetime.now()
|
||||||
|
global a
|
||||||
|
t = datetime.timedelta(0, 3)
|
||||||
|
if (b - a) > t:
|
||||||
|
global globalna
|
||||||
|
globalna = 0
|
||||||
|
|
||||||
|
|
||||||
|
global a
|
||||||
|
a = None
|
||||||
|
i = 0
|
||||||
|
cam_status = 1
|
||||||
|
liczba = 0
|
||||||
|
globalna = 0
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
four_cc = cv2.VideoWriter_fourcc(*'XVID')
|
||||||
|
out = cv2.VideoWriter('output.avi', four_cc, 10.0, (1280, 720))
|
||||||
|
cap.set(3, 1280)
|
||||||
|
cap.set(4, 720)
|
||||||
|
|
||||||
|
while cap.isOpened():
|
||||||
|
ret, frameprev = cap.read()
|
||||||
|
|
||||||
|
if cam_status == 1:
|
||||||
|
frame = frameprev
|
||||||
|
elif cam_status == 0:
|
||||||
|
frame = np.zeros((720, 1280, 3), np.uint8)
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
|
||||||
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
|
|
||||||
|
actual_time = str(datetime.datetime.now().strftime("%b %d %Y %H:%M:%S"))
|
||||||
|
|
||||||
|
frame = cv2.putText(frame, actual_time, (10, 50), font, 1, (0, 255, 255), 2, cv2.LINE_AA)
|
||||||
|
|
||||||
|
menu = cv2.imread('menu.png')[620:720, 0:1280]
|
||||||
|
frame[620:720, 0:1280] = menu
|
||||||
|
|
||||||
|
icon1 = cv2.imread('icon1.png')
|
||||||
|
frame[640:640 + 63, 20:169] = icon1
|
||||||
|
|
||||||
|
icon2 = cv2.imread('icon2.png')
|
||||||
|
frame[640:640 + 63, 189:448] = icon2
|
||||||
|
|
||||||
|
icon3 = cv2.imread('icon3.png')
|
||||||
|
frame[640:640 + 63, 468:734] = icon3
|
||||||
|
|
||||||
|
icon4 = cv2.imread('icon4.png')
|
||||||
|
frame[640:640 + 63, 754:945] = icon4
|
||||||
|
|
||||||
|
icon5 = cv2.imread('icon5.png')
|
||||||
|
frame[640:640 + 63, 965:1173] = icon5
|
||||||
|
|
||||||
|
icon6 = cv2.imread('icon6.png')
|
||||||
|
frame[640:640 + 63, 1193:1267] = icon6
|
||||||
|
|
||||||
|
if globalna == 1:
|
||||||
|
notification()
|
||||||
|
|
||||||
|
cv2.imshow('frame', frame)
|
||||||
|
out.write(frameprev)
|
||||||
|
cv2.setMouseCallback('frame', click_event)
|
||||||
|
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
cap.release()
|
||||||
|
out.release()
|
||||||
|
cv2.destroyAllWindows()
|
BIN
output.avi
Normal file
BIN
output.avi
Normal file
Binary file not shown.
27
venv/Include/site/python3.7/pygame/_camera.h
Normal file
27
venv/Include/site/python3.7/pygame/_camera.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _CAMERA_H
|
||||||
|
#define _CAMERA_H
|
||||||
|
|
||||||
|
#include "_pygame.h"
|
||||||
|
#include "camera.h"
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
864
venv/Include/site/python3.7/pygame/_pygame.h
Normal file
864
venv/Include/site/python3.7/pygame/_pygame.h
Normal file
@ -0,0 +1,864 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _PYGAME_H
|
||||||
|
#define _PYGAME_H
|
||||||
|
|
||||||
|
/** This header file includes all the definitions for the
|
||||||
|
** base pygame extensions. This header only requires
|
||||||
|
** SDL and Python includes. The reason for functions
|
||||||
|
** prototyped with #define's is to allow for maximum
|
||||||
|
** python portability. It also uses python as the
|
||||||
|
** runtime linker, which allows for late binding. For more
|
||||||
|
** information on this style of development, read the Python
|
||||||
|
** docs on this subject.
|
||||||
|
** http://www.python.org/doc/current/ext/using-cobjects.html
|
||||||
|
**
|
||||||
|
** If using this to build your own derived extensions,
|
||||||
|
** you'll see that the functions available here are mainly
|
||||||
|
** used to help convert between python objects and SDL objects.
|
||||||
|
** Since this library doesn't add a lot of functionality to
|
||||||
|
** the SDL libarary, it doesn't need to offer a lot either.
|
||||||
|
**
|
||||||
|
** When initializing your extension module, you must manually
|
||||||
|
** import the modules you want to use. (this is the part about
|
||||||
|
** using python as the runtime linker). Each module has its
|
||||||
|
** own import_xxx() routine. You need to perform this import
|
||||||
|
** after you have initialized your own module, and before
|
||||||
|
** you call any routines from that module. Since every module
|
||||||
|
** in pygame does this, there are plenty of examples.
|
||||||
|
**
|
||||||
|
** The base module does include some useful conversion routines
|
||||||
|
** that you are free to use in your own extension.
|
||||||
|
**
|
||||||
|
** When making changes, it is very important to keep the
|
||||||
|
** FIRSTSLOT and NUMSLOT constants up to date for each
|
||||||
|
** section. Also be sure not to overlap any of the slots.
|
||||||
|
** When you do make a mistake with this, it will result
|
||||||
|
** is a dereferenced NULL pointer that is easier to diagnose
|
||||||
|
** than it could be :]
|
||||||
|
**/
|
||||||
|
#if defined(HAVE_SNPRINTF) /* defined in python.h (pyerrors.h) and SDL.h \
|
||||||
|
(SDL_config.h) */
|
||||||
|
#undef HAVE_SNPRINTF /* remove GCC redefine warning */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// This must be before all else
|
||||||
|
#if defined(__SYMBIAN32__) && defined(OPENC)
|
||||||
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
#if defined(__WINS__)
|
||||||
|
void *
|
||||||
|
_alloca(size_t size);
|
||||||
|
#define alloca _alloca
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PG_STRINGIZE_HELPER(x) #x
|
||||||
|
#define PG_STRINGIZE(x) PG_STRINGIZE_HELPER(x)
|
||||||
|
#define PG_WARN(desc) message(__FILE__ "(" PG_STRINGIZE(__LINE__) "): WARNING: " #desc)
|
||||||
|
|
||||||
|
/* This is unconditionally defined in Python.h */
|
||||||
|
#if defined(_POSIX_C_SOURCE)
|
||||||
|
#undef _POSIX_C_SOURCE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
|
||||||
|
/* the version macros are defined since version 1.9.5 */
|
||||||
|
#define PG_MAJOR_VERSION 1
|
||||||
|
#define PG_MINOR_VERSION 9
|
||||||
|
#define PG_PATCH_VERSION 6
|
||||||
|
#define PG_VERSIONNUM(MAJOR, MINOR, PATCH) (1000*(MAJOR) + 100*(MINOR) + (PATCH))
|
||||||
|
#define PG_VERSION_ATLEAST(MAJOR, MINOR, PATCH) \
|
||||||
|
(PG_VERSIONNUM(PG_MAJOR_VERSION, PG_MINOR_VERSION, PG_PATCH_VERSION) >= \
|
||||||
|
PG_VERSIONNUM(MAJOR, MINOR, PATCH))
|
||||||
|
|
||||||
|
/* Cobjects vanish in Python 3.2; so we will code as though we use capsules */
|
||||||
|
#if defined(Py_CAPSULE_H)
|
||||||
|
#define PG_HAVE_CAPSULE 1
|
||||||
|
#else
|
||||||
|
#define PG_HAVE_CAPSULE 0
|
||||||
|
#endif
|
||||||
|
#if defined(Py_COBJECT_H)
|
||||||
|
#define PG_HAVE_COBJECT 1
|
||||||
|
#else
|
||||||
|
#define PG_HAVE_COBJECT 0
|
||||||
|
#endif
|
||||||
|
#if !PG_HAVE_CAPSULE
|
||||||
|
#define PyCapsule_New(ptr, n, dfn) PyCObject_FromVoidPtr(ptr, dfn)
|
||||||
|
#define PyCapsule_GetPointer(obj, n) PyCObject_AsVoidPtr(obj)
|
||||||
|
#define PyCapsule_CheckExact(obj) PyCObject_Check(obj)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Pygame uses Py_buffer (PEP 3118) to exchange array information internally;
|
||||||
|
* define here as needed.
|
||||||
|
*/
|
||||||
|
#if !defined(PyBUF_SIMPLE)
|
||||||
|
typedef struct bufferinfo {
|
||||||
|
void *buf;
|
||||||
|
PyObject *obj;
|
||||||
|
Py_ssize_t len;
|
||||||
|
Py_ssize_t itemsize;
|
||||||
|
int readonly;
|
||||||
|
int ndim;
|
||||||
|
char *format;
|
||||||
|
Py_ssize_t *shape;
|
||||||
|
Py_ssize_t *strides;
|
||||||
|
Py_ssize_t *suboffsets;
|
||||||
|
void *internal;
|
||||||
|
} Py_buffer;
|
||||||
|
|
||||||
|
/* Flags for getting buffers */
|
||||||
|
#define PyBUF_SIMPLE 0
|
||||||
|
#define PyBUF_WRITABLE 0x0001
|
||||||
|
/* we used to include an E, backwards compatible alias */
|
||||||
|
#define PyBUF_WRITEABLE PyBUF_WRITABLE
|
||||||
|
#define PyBUF_FORMAT 0x0004
|
||||||
|
#define PyBUF_ND 0x0008
|
||||||
|
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
|
||||||
|
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
|
||||||
|
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
|
||||||
|
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
|
||||||
|
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
|
||||||
|
|
||||||
|
#define PyBUF_CONTIG (PyBUF_ND | PyBUF_WRITABLE)
|
||||||
|
#define PyBUF_CONTIG_RO (PyBUF_ND)
|
||||||
|
|
||||||
|
#define PyBUF_STRIDED (PyBUF_STRIDES | PyBUF_WRITABLE)
|
||||||
|
#define PyBUF_STRIDED_RO (PyBUF_STRIDES)
|
||||||
|
|
||||||
|
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT)
|
||||||
|
#define PyBUF_RECORDS_RO (PyBUF_STRIDES | PyBUF_FORMAT)
|
||||||
|
|
||||||
|
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_WRITABLE | PyBUF_FORMAT)
|
||||||
|
#define PyBUF_FULL_RO (PyBUF_INDIRECT | PyBUF_FORMAT)
|
||||||
|
|
||||||
|
#define PyBUF_READ 0x100
|
||||||
|
#define PyBUF_WRITE 0x200
|
||||||
|
#define PyBUF_SHADOW 0x400
|
||||||
|
|
||||||
|
typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
|
||||||
|
typedef void (*releasebufferproc)(Py_buffer *);
|
||||||
|
#endif /* #if !defined(PyBUF_SIMPLE) */
|
||||||
|
|
||||||
|
/* Flag indicating a pg_buffer; used for assertions within callbacks */
|
||||||
|
#ifndef NDEBUG
|
||||||
|
#define PyBUF_PYGAME 0x4000
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define PyBUF_HAS_FLAG(f, F) (((f) & (F)) == (F))
|
||||||
|
|
||||||
|
/* Array information exchange struct C type; inherits from Py_buffer
|
||||||
|
*
|
||||||
|
* Pygame uses its own Py_buffer derived C struct as an internal representation
|
||||||
|
* of an imported array buffer. The extended Py_buffer allows for a
|
||||||
|
* per-instance release callback,
|
||||||
|
*/
|
||||||
|
typedef void (*pybuffer_releaseproc)(Py_buffer *);
|
||||||
|
|
||||||
|
typedef struct pg_bufferinfo_s {
|
||||||
|
Py_buffer view;
|
||||||
|
PyObject *consumer; /* Input: Borrowed reference */
|
||||||
|
pybuffer_releaseproc release_buffer;
|
||||||
|
} pg_buffer;
|
||||||
|
|
||||||
|
/* Operating system specific adjustments
|
||||||
|
*/
|
||||||
|
// No signal()
|
||||||
|
#if defined(__SYMBIAN32__) && defined(HAVE_SIGNAL_H)
|
||||||
|
#undef HAVE_SIGNAL_H
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(HAVE_SNPRINTF)
|
||||||
|
#undef HAVE_SNPRINTF
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef MS_WIN32 /*Python gives us MS_WIN32, SDL needs just WIN32*/
|
||||||
|
#ifndef WIN32
|
||||||
|
#define WIN32
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// Prefix when initializing module
|
||||||
|
#define MODPREFIX ""
|
||||||
|
/// Prefix when importing module
|
||||||
|
#define IMPPREFIX "pygame."
|
||||||
|
|
||||||
|
#ifdef __SYMBIAN32__
|
||||||
|
#undef MODPREFIX
|
||||||
|
#undef IMPPREFIX
|
||||||
|
// On Symbian there is no pygame package. The extensions are built-in or in
|
||||||
|
// sys\bin.
|
||||||
|
#define MODPREFIX "pygame_"
|
||||||
|
#define IMPPREFIX "pygame_"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <SDL.h>
|
||||||
|
|
||||||
|
/* Pygame's SDL version macros:
|
||||||
|
* IS_SDLv1 is 1 if SDL 1.x.x, 0 otherwise
|
||||||
|
* IS_SDLv2 is 1 if at least SDL 2.0.0, 0 otherwise
|
||||||
|
*/
|
||||||
|
#if (SDL_VERSION_ATLEAST(2, 0, 0))
|
||||||
|
#define IS_SDLv1 0
|
||||||
|
#define IS_SDLv2 1
|
||||||
|
#else
|
||||||
|
#define IS_SDLv1 1
|
||||||
|
#define IS_SDLv2 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*#if IS_SDLv1 && PG_MAJOR_VERSION >= 2
|
||||||
|
#error pygame 2 requires SDL 2
|
||||||
|
#endif*/
|
||||||
|
|
||||||
|
#if IS_SDLv2
|
||||||
|
/* SDL 1.2 constants removed from SDL 2 */
|
||||||
|
typedef enum {
|
||||||
|
SDL_HWSURFACE = 0,
|
||||||
|
SDL_RESIZABLE = SDL_WINDOW_RESIZABLE,
|
||||||
|
SDL_ASYNCBLIT = 0,
|
||||||
|
SDL_OPENGL = SDL_WINDOW_OPENGL,
|
||||||
|
SDL_OPENGLBLIT = 0,
|
||||||
|
SDL_ANYFORMAT = 0,
|
||||||
|
SDL_HWPALETTE = 0,
|
||||||
|
SDL_DOUBLEBUF = 0,
|
||||||
|
SDL_FULLSCREEN = SDL_WINDOW_FULLSCREEN,
|
||||||
|
SDL_HWACCEL = 0,
|
||||||
|
SDL_SRCCOLORKEY = 0,
|
||||||
|
SDL_RLEACCELOK = 0,
|
||||||
|
SDL_SRCALPHA = 0,
|
||||||
|
SDL_NOFRAME = SDL_WINDOW_BORDERLESS,
|
||||||
|
SDL_GL_SWAP_CONTROL = 0,
|
||||||
|
TIMER_RESOLUTION = 0
|
||||||
|
} PygameVideoFlags;
|
||||||
|
|
||||||
|
/* the wheel button constants were removed from SDL 2 */
|
||||||
|
typedef enum {
|
||||||
|
PGM_BUTTON_LEFT = SDL_BUTTON_LEFT,
|
||||||
|
PGM_BUTTON_RIGHT = SDL_BUTTON_RIGHT,
|
||||||
|
PGM_BUTTON_MIDDLE = SDL_BUTTON_MIDDLE,
|
||||||
|
PGM_BUTTON_WHEELUP = 4,
|
||||||
|
PGM_BUTTON_WHEELDOWN = 5,
|
||||||
|
PGM_BUTTON_X1 = SDL_BUTTON_X1 + 2,
|
||||||
|
PGM_BUTTON_X2 = SDL_BUTTON_X2 + 2,
|
||||||
|
PGM_BUTTON_KEEP = 0x80
|
||||||
|
} PygameMouseFlags;
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SDL_NOEVENT = 0,
|
||||||
|
/* SDL 1.2 allowed for 8 user defined events. */
|
||||||
|
SDL_NUMEVENTS = SDL_USEREVENT + 8,
|
||||||
|
SDL_ACTIVEEVENT = SDL_NUMEVENTS,
|
||||||
|
PGE_EVENTBEGIN = SDL_NUMEVENTS,
|
||||||
|
SDL_VIDEORESIZE,
|
||||||
|
SDL_VIDEOEXPOSE,
|
||||||
|
PGE_KEYREPEAT,
|
||||||
|
PGE_EVENTEND
|
||||||
|
} PygameEventCode;
|
||||||
|
|
||||||
|
#define PGE_NUMEVENTS (PGE_EVENTEND - PGE_EVENTBEGIN)
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
SDL_APPFOCUSMOUSE,
|
||||||
|
SDL_APPINPUTFOCUS,
|
||||||
|
SDL_APPACTIVE
|
||||||
|
} PygameAppCode;
|
||||||
|
|
||||||
|
/* Surface flags: based on SDL 1.2 flags */
|
||||||
|
typedef enum {
|
||||||
|
PGS_SWSURFACE = 0x00000000,
|
||||||
|
PGS_HWSURFACE = 0x00000001,
|
||||||
|
PGS_ASYNCBLIT = 0x00000004,
|
||||||
|
|
||||||
|
PGS_ANYFORMAT = 0x10000000,
|
||||||
|
PGS_HWPALETTE = 0x20000000,
|
||||||
|
PGS_DOUBLEBUF = 0x40000000,
|
||||||
|
PGS_FULLSCREEN = 0x80000000,
|
||||||
|
PGS_OPENGL = 0x00000002,
|
||||||
|
PGS_OPENGLBLIT = 0x0000000A,
|
||||||
|
PGS_RESIZABLE = 0x00000010,
|
||||||
|
PGS_NOFRAME = 0x00000020,
|
||||||
|
PGS_SHOWN = 0x00000040, /* Added from SDL 2 */
|
||||||
|
PGS_HIDDEN = 0x00000080, /* Added from SDL 2 */
|
||||||
|
|
||||||
|
PGS_HWACCEL = 0x00000100,
|
||||||
|
PGS_SRCCOLORKEY = 0x00001000,
|
||||||
|
PGS_RLEACCELOK = 0x00002000,
|
||||||
|
PGS_RLEACCEL = 0x00004000,
|
||||||
|
PGS_SRCALPHA = 0x00010000,
|
||||||
|
PGS_PREALLOC = 0x01000000
|
||||||
|
} PygameSurfaceFlags;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
Uint32 hw_available:1;
|
||||||
|
Uint32 wm_available:1;
|
||||||
|
Uint32 blit_hw:1;
|
||||||
|
Uint32 blit_hw_CC:1;
|
||||||
|
Uint32 blit_hw_A:1;
|
||||||
|
Uint32 blit_sw:1;
|
||||||
|
Uint32 blit_sw_CC:1;
|
||||||
|
Uint32 blit_sw_A:1;
|
||||||
|
Uint32 blit_fill:1;
|
||||||
|
Uint32 video_mem;
|
||||||
|
SDL_PixelFormat *vfmt;
|
||||||
|
SDL_PixelFormat vfmt_data;
|
||||||
|
int current_w;
|
||||||
|
int current_h;
|
||||||
|
} pg_VideoInfo;
|
||||||
|
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
/* macros used throughout the source */
|
||||||
|
#define RAISE(x, y) (PyErr_SetString((x), (y)), (PyObject *)NULL)
|
||||||
|
|
||||||
|
#ifdef WITH_THREAD
|
||||||
|
#define PG_CHECK_THREADS() (1)
|
||||||
|
#else /* ~WITH_THREAD */
|
||||||
|
#define PG_CHECK_THREADS() \
|
||||||
|
(RAISE(PyExc_NotImplementedError, \
|
||||||
|
"Python built without thread support"))
|
||||||
|
#endif /* ~WITH_THREAD */
|
||||||
|
|
||||||
|
#define PyType_Init(x) (((x).ob_type) = &PyType_Type)
|
||||||
|
#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API"
|
||||||
|
|
||||||
|
#ifndef MIN
|
||||||
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef MAX
|
||||||
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef ABS
|
||||||
|
#define ABS(a) (((a) < 0) ? -(a) : (a))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* test sdl initializations */
|
||||||
|
#define VIDEO_INIT_CHECK() \
|
||||||
|
if (!SDL_WasInit(SDL_INIT_VIDEO)) \
|
||||||
|
return RAISE(pgExc_SDLError, "video system not initialized")
|
||||||
|
|
||||||
|
#define CDROM_INIT_CHECK() \
|
||||||
|
if (!SDL_WasInit(SDL_INIT_CDROM)) \
|
||||||
|
return RAISE(pgExc_SDLError, "cdrom system not initialized")
|
||||||
|
|
||||||
|
#define JOYSTICK_INIT_CHECK() \
|
||||||
|
if (!SDL_WasInit(SDL_INIT_JOYSTICK)) \
|
||||||
|
return RAISE(pgExc_SDLError, "joystick system not initialized")
|
||||||
|
|
||||||
|
/* BASE */
|
||||||
|
#define VIEW_CONTIGUOUS 1
|
||||||
|
#define VIEW_C_ORDER 2
|
||||||
|
#define VIEW_F_ORDER 4
|
||||||
|
|
||||||
|
#define PYGAMEAPI_BASE_FIRSTSLOT 0
|
||||||
|
#if IS_SDLv1
|
||||||
|
#define PYGAMEAPI_BASE_NUMSLOTS 19
|
||||||
|
#else /* IS_SDLv2 */
|
||||||
|
#define PYGAMEAPI_BASE_NUMSLOTS 23
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
#ifndef PYGAMEAPI_BASE_INTERNAL
|
||||||
|
#define pgExc_SDLError ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT])
|
||||||
|
|
||||||
|
#define pg_RegisterQuit \
|
||||||
|
(*(void (*)(void (*)(void)))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 1])
|
||||||
|
|
||||||
|
#define pg_IntFromObj \
|
||||||
|
(*(int (*)(PyObject *, int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 2])
|
||||||
|
|
||||||
|
#define pg_IntFromObjIndex \
|
||||||
|
(*(int (*)(PyObject *, int, \
|
||||||
|
int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 3])
|
||||||
|
|
||||||
|
#define pg_TwoIntsFromObj \
|
||||||
|
(*(int (*)(PyObject *, int *, \
|
||||||
|
int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 4])
|
||||||
|
|
||||||
|
#define pg_FloatFromObj \
|
||||||
|
(*(int (*)(PyObject *, float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 5])
|
||||||
|
|
||||||
|
#define pg_FloatFromObjIndex \
|
||||||
|
(*(int (*)(PyObject *, int, \
|
||||||
|
float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 6])
|
||||||
|
|
||||||
|
#define pg_TwoFloatsFromObj \
|
||||||
|
(*(int (*)(PyObject *, float *, \
|
||||||
|
float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 7])
|
||||||
|
|
||||||
|
#define pg_UintFromObj \
|
||||||
|
(*(int (*)(PyObject *, \
|
||||||
|
Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 8])
|
||||||
|
|
||||||
|
#define pg_UintFromObjIndex \
|
||||||
|
(*(int (*)(PyObject *, int, \
|
||||||
|
Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 9])
|
||||||
|
|
||||||
|
#define pgVideo_AutoQuit \
|
||||||
|
(*(void (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 10])
|
||||||
|
|
||||||
|
#define pgVideo_AutoInit \
|
||||||
|
(*(int (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 11])
|
||||||
|
|
||||||
|
#define pg_RGBAFromObj \
|
||||||
|
(*(int (*)(PyObject *, \
|
||||||
|
Uint8 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 12])
|
||||||
|
|
||||||
|
#define pgBuffer_AsArrayInterface \
|
||||||
|
(*(PyObject * (*)(Py_buffer *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 13])
|
||||||
|
|
||||||
|
#define pgBuffer_AsArrayStruct \
|
||||||
|
(*(PyObject * (*)(Py_buffer *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 14])
|
||||||
|
|
||||||
|
#define pgObject_GetBuffer \
|
||||||
|
(*(int (*)(PyObject *, pg_buffer *, \
|
||||||
|
int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 15])
|
||||||
|
|
||||||
|
#define pgBuffer_Release \
|
||||||
|
(*(void (*)(pg_buffer *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 16])
|
||||||
|
|
||||||
|
#define pgDict_AsBuffer \
|
||||||
|
(*(int (*)(pg_buffer *, PyObject *, \
|
||||||
|
int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 17])
|
||||||
|
|
||||||
|
#define pgExc_BufferError \
|
||||||
|
((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 18])
|
||||||
|
|
||||||
|
#if IS_SDLv2
|
||||||
|
#define pg_GetDefaultWindow \
|
||||||
|
(*(SDL_Window * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 19])
|
||||||
|
|
||||||
|
#define pg_SetDefaultWindow \
|
||||||
|
(*(void (*)(SDL_Window *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 20])
|
||||||
|
|
||||||
|
#define pg_GetDefaultWindowSurface \
|
||||||
|
(*(PyObject * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 21])
|
||||||
|
|
||||||
|
#define pg_SetDefaultWindowSurface \
|
||||||
|
(*(void (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 22])
|
||||||
|
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
|
||||||
|
#define import_pygame_base() IMPORT_PYGAME_MODULE(base, BASE)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* RECT */
|
||||||
|
#define PYGAMEAPI_RECT_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_BASE_FIRSTSLOT + PYGAMEAPI_BASE_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_RECT_NUMSLOTS 4
|
||||||
|
|
||||||
|
#if IS_SDLv1
|
||||||
|
typedef struct {
|
||||||
|
int x, y;
|
||||||
|
int w, h;
|
||||||
|
} GAME_Rect;
|
||||||
|
#else
|
||||||
|
typedef SDL_Rect GAME_Rect;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD GAME_Rect r;
|
||||||
|
PyObject *weakreflist;
|
||||||
|
} pgRectObject;
|
||||||
|
|
||||||
|
#define pgRect_AsRect(x) (((pgRectObject *)x)->r)
|
||||||
|
#ifndef PYGAMEAPI_RECT_INTERNAL
|
||||||
|
#define pgRect_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0])
|
||||||
|
#define pgRect_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0])
|
||||||
|
#define pgRect_New \
|
||||||
|
(*(PyObject * (*)(SDL_Rect *)) PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 1])
|
||||||
|
#define pgRect_New4 \
|
||||||
|
(*(PyObject * (*)(int, int, int, int)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 2])
|
||||||
|
#define pgRect_FromObject \
|
||||||
|
(*(GAME_Rect * (*)(PyObject *, GAME_Rect *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 3])
|
||||||
|
|
||||||
|
#define import_pygame_rect() IMPORT_PYGAME_MODULE(rect, RECT)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* CDROM */
|
||||||
|
#define PYGAMEAPI_CDROM_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_RECT_FIRSTSLOT + PYGAMEAPI_RECT_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_CDROM_NUMSLOTS 2
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD int id;
|
||||||
|
} pgCDObject;
|
||||||
|
|
||||||
|
#define pgCD_AsID(x) (((pgCDObject *)x)->id)
|
||||||
|
#ifndef PYGAMEAPI_CDROM_INTERNAL
|
||||||
|
#define pgCD_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0])
|
||||||
|
#define pgCD_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0])
|
||||||
|
#define pgCD_New \
|
||||||
|
(*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 1])
|
||||||
|
|
||||||
|
#define import_pygame_cd() IMPORT_PYGAME_MODULE(cdrom, CDROM)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* JOYSTICK */
|
||||||
|
#define PYGAMEAPI_JOYSTICK_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_CDROM_FIRSTSLOT + PYGAMEAPI_CDROM_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_JOYSTICK_NUMSLOTS 2
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD int id;
|
||||||
|
} pgJoystickObject;
|
||||||
|
|
||||||
|
#define pgJoystick_AsID(x) (((pgJoystickObject *)x)->id)
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_JOYSTICK_INTERNAL
|
||||||
|
#define pgJoystick_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0])
|
||||||
|
|
||||||
|
#define pgJoystick_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0])
|
||||||
|
#define pgJoystick_New \
|
||||||
|
(*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 1])
|
||||||
|
|
||||||
|
#define import_pygame_joystick() IMPORT_PYGAME_MODULE(joystick, JOYSTICK)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* DISPLAY */
|
||||||
|
#define PYGAMEAPI_DISPLAY_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_JOYSTICK_FIRSTSLOT + PYGAMEAPI_JOYSTICK_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_DISPLAY_NUMSLOTS 2
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
#if IS_SDLv1
|
||||||
|
PyObject_HEAD SDL_VideoInfo info;
|
||||||
|
#else
|
||||||
|
PyObject_HEAD pg_VideoInfo info;
|
||||||
|
#endif
|
||||||
|
} pgVidInfoObject;
|
||||||
|
|
||||||
|
#define pgVidInfo_AsVidInfo(x) (((pgVidInfoObject *)x)->info)
|
||||||
|
#ifndef PYGAMEAPI_DISPLAY_INTERNAL
|
||||||
|
#define pgVidInfo_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0])
|
||||||
|
|
||||||
|
#define pgVidInfo_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0])
|
||||||
|
|
||||||
|
#if IS_SDLv1
|
||||||
|
#define pgVidInfo_New \
|
||||||
|
(*(PyObject * (*)(SDL_VideoInfo *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1])
|
||||||
|
#else
|
||||||
|
#define pgVidInfo_New \
|
||||||
|
(*(PyObject * (*)(pg_VideoInfo *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1])
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define import_pygame_display() IMPORT_PYGAME_MODULE(display, DISPLAY)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* SURFACE */
|
||||||
|
#define PYGAMEAPI_SURFACE_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_DISPLAY_FIRSTSLOT + PYGAMEAPI_DISPLAY_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_SURFACE_NUMSLOTS 3
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD SDL_Surface *surf;
|
||||||
|
#if IS_SDLv2
|
||||||
|
int owner;
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
struct pgSubSurface_Data *subsurface; /*ptr to subsurface data (if a
|
||||||
|
* subsurface)*/
|
||||||
|
PyObject *weakreflist;
|
||||||
|
PyObject *locklist;
|
||||||
|
PyObject *dependency;
|
||||||
|
} pgSurfaceObject;
|
||||||
|
#define pgSurface_AsSurface(x) (((pgSurfaceObject *)x)->surf)
|
||||||
|
#ifndef PYGAMEAPI_SURFACE_INTERNAL
|
||||||
|
#define pgSurface_Check(x) \
|
||||||
|
(PyObject_IsInstance((x), \
|
||||||
|
(PyObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0]))
|
||||||
|
#define pgSurface_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0])
|
||||||
|
#if IS_SDLv1
|
||||||
|
#define pgSurface_New \
|
||||||
|
(*(PyObject * (*)(SDL_Surface *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1])
|
||||||
|
#else /* IS_SDLv2 */
|
||||||
|
#define pgSurface_New2 \
|
||||||
|
(*(PyObject * (*)(SDL_Surface *, int)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1])
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
#define pgSurface_Blit \
|
||||||
|
(*(int (*)(PyObject *, PyObject *, SDL_Rect *, SDL_Rect *, \
|
||||||
|
int))PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 2])
|
||||||
|
|
||||||
|
#define import_pygame_surface() \
|
||||||
|
do { \
|
||||||
|
IMPORT_PYGAME_MODULE(surface, SURFACE); \
|
||||||
|
if (PyErr_Occurred() != NULL) \
|
||||||
|
break; \
|
||||||
|
IMPORT_PYGAME_MODULE(surflock, SURFLOCK); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#if IS_SDLv2
|
||||||
|
#define pgSurface_New(surface) pgSurface_New2((surface), 1)
|
||||||
|
#define pgSurface_NewNoOwn(surface) pgSurface_New2((surface), 0)
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* SURFLOCK */ /*auto import/init by surface*/
|
||||||
|
#define PYGAMEAPI_SURFLOCK_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_SURFACE_FIRSTSLOT + PYGAMEAPI_SURFACE_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_SURFLOCK_NUMSLOTS 8
|
||||||
|
struct pgSubSurface_Data {
|
||||||
|
PyObject *owner;
|
||||||
|
int pixeloffset;
|
||||||
|
int offsetx, offsety;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD PyObject *surface;
|
||||||
|
PyObject *lockobj;
|
||||||
|
PyObject *weakrefs;
|
||||||
|
} pgLifetimeLockObject;
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_SURFLOCK_INTERNAL
|
||||||
|
#define pgLifetimeLock_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 0])
|
||||||
|
#define pgSurface_Prep(x) \
|
||||||
|
if (((pgSurfaceObject *)x)->subsurface) \
|
||||||
|
(*(*(void (*)( \
|
||||||
|
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 1]))(x)
|
||||||
|
|
||||||
|
#define pgSurface_Unprep(x) \
|
||||||
|
if (((pgSurfaceObject *)x)->subsurface) \
|
||||||
|
(*(*(void (*)( \
|
||||||
|
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 2]))(x)
|
||||||
|
|
||||||
|
#define pgSurface_Lock \
|
||||||
|
(*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 3])
|
||||||
|
#define pgSurface_Unlock \
|
||||||
|
(*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 4])
|
||||||
|
#define pgSurface_LockBy \
|
||||||
|
(*(int (*)(PyObject *, \
|
||||||
|
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 5])
|
||||||
|
#define pgSurface_UnlockBy \
|
||||||
|
(*(int (*)(PyObject *, \
|
||||||
|
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 6])
|
||||||
|
#define pgSurface_LockLifetime \
|
||||||
|
(*(PyObject * (*)(PyObject *, PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 7])
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* EVENT */
|
||||||
|
#define PYGAMEAPI_EVENT_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_SURFLOCK_FIRSTSLOT + PYGAMEAPI_SURFLOCK_NUMSLOTS)
|
||||||
|
#if IS_SDLv1
|
||||||
|
#define PYGAMEAPI_EVENT_NUMSLOTS 4
|
||||||
|
#else /* IS_SDLv2 */
|
||||||
|
#define PYGAMEAPI_EVENT_NUMSLOTS 6
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD int type;
|
||||||
|
PyObject *dict;
|
||||||
|
} pgEventObject;
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_EVENT_INTERNAL
|
||||||
|
#define pgEvent_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0])
|
||||||
|
#define pgEvent_Type \
|
||||||
|
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0])
|
||||||
|
#define pgEvent_New \
|
||||||
|
(*(PyObject * (*)(SDL_Event *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 1])
|
||||||
|
#define pgEvent_New2 \
|
||||||
|
(*(PyObject * (*)(int, PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 2])
|
||||||
|
#define pgEvent_FillUserEvent \
|
||||||
|
(*(int (*)(pgEventObject *, \
|
||||||
|
SDL_Event *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 3])
|
||||||
|
#if IS_SDLv2
|
||||||
|
#define pg_EnableKeyRepeat \
|
||||||
|
(*(int (*)(int, int))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 4])
|
||||||
|
#define pg_GetKeyRepeat \
|
||||||
|
(*(void (*)(int *, int *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 5])
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
#define import_pygame_event() IMPORT_PYGAME_MODULE(event, EVENT)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* RWOBJECT */
|
||||||
|
/*the rwobject are only needed for C side work, not accessable from python*/
|
||||||
|
#define PYGAMEAPI_RWOBJECT_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_EVENT_FIRSTSLOT + PYGAMEAPI_EVENT_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_RWOBJECT_NUMSLOTS 6
|
||||||
|
#ifndef PYGAMEAPI_RWOBJECT_INTERNAL
|
||||||
|
#define pgRWops_FromObject \
|
||||||
|
(*(SDL_RWops * (*)(PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 0])
|
||||||
|
#define pgRWops_IsFileObject \
|
||||||
|
(*(int (*)(SDL_RWops *))PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 1])
|
||||||
|
#define pg_EncodeFilePath \
|
||||||
|
(*(PyObject * (*)(PyObject *, PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 2])
|
||||||
|
#define pg_EncodeString \
|
||||||
|
(*(PyObject * (*)(PyObject *, const char *, const char *, PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 3])
|
||||||
|
#define pgRWops_FromFileObject \
|
||||||
|
(*(SDL_RWops * (*)(PyObject *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 4])
|
||||||
|
#define pgRWops_ReleaseObject \
|
||||||
|
(*(int (*)(SDL_RWops *)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 5])
|
||||||
|
#define import_pygame_rwobject() IMPORT_PYGAME_MODULE(rwobject, RWOBJECT)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* PixelArray */
|
||||||
|
#define PYGAMEAPI_PIXELARRAY_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_RWOBJECT_FIRSTSLOT + PYGAMEAPI_RWOBJECT_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_PIXELARRAY_NUMSLOTS 2
|
||||||
|
#ifndef PYGAMEAPI_PIXELARRAY_INTERNAL
|
||||||
|
#define PyPixelArray_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 0])
|
||||||
|
#define PyPixelArray_New \
|
||||||
|
(*(PyObject * (*)) PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 1])
|
||||||
|
#define import_pygame_pixelarray() IMPORT_PYGAME_MODULE(pixelarray, PIXELARRAY)
|
||||||
|
#endif /* PYGAMEAPI_PIXELARRAY_INTERNAL */
|
||||||
|
|
||||||
|
/* Color */
|
||||||
|
#define PYGAMEAPI_COLOR_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_PIXELARRAY_FIRSTSLOT + PYGAMEAPI_PIXELARRAY_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_COLOR_NUMSLOTS 4
|
||||||
|
#ifndef PYGAMEAPI_COLOR_INTERNAL
|
||||||
|
#define pgColor_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 0])
|
||||||
|
#define pgColor_Type (*(PyObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT])
|
||||||
|
#define pgColor_New \
|
||||||
|
(*(PyObject * (*)(Uint8 *)) PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 1])
|
||||||
|
#define pgColor_NewLength \
|
||||||
|
(*(PyObject * (*)(Uint8 *, Uint8)) \
|
||||||
|
PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 3])
|
||||||
|
|
||||||
|
#define pg_RGBAFromColorObj \
|
||||||
|
(*(int (*)(PyObject *, \
|
||||||
|
Uint8 *))PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 2])
|
||||||
|
#define import_pygame_color() IMPORT_PYGAME_MODULE(color, COLOR)
|
||||||
|
#endif /* PYGAMEAPI_COLOR_INTERNAL */
|
||||||
|
|
||||||
|
/* Math */
|
||||||
|
#define PYGAMEAPI_MATH_FIRSTSLOT \
|
||||||
|
(PYGAMEAPI_COLOR_FIRSTSLOT + PYGAMEAPI_COLOR_NUMSLOTS)
|
||||||
|
#define PYGAMEAPI_MATH_NUMSLOTS 2
|
||||||
|
#ifndef PYGAMEAPI_MATH_INTERNAL
|
||||||
|
#define pgVector2_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 0])
|
||||||
|
#define pgVector3_Check(x) \
|
||||||
|
((x)->ob_type == \
|
||||||
|
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1])
|
||||||
|
/*
|
||||||
|
#define pgVector2_New \
|
||||||
|
(*(PyObject*(*)) PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1])
|
||||||
|
*/
|
||||||
|
#define import_pygame_math() IMPORT_PYGAME_MODULE(math, MATH)
|
||||||
|
#endif /* PYGAMEAPI_MATH_INTERNAL */
|
||||||
|
|
||||||
|
#define PG_CAPSULE_NAME(m) (IMPPREFIX m "." PYGAMEAPI_LOCAL_ENTRY)
|
||||||
|
|
||||||
|
#define _IMPORT_PYGAME_MODULE(module, MODULE, api_root) \
|
||||||
|
{ \
|
||||||
|
PyObject *_module = PyImport_ImportModule(IMPPREFIX #module); \
|
||||||
|
\
|
||||||
|
if (_module != NULL) { \
|
||||||
|
PyObject *_c_api = \
|
||||||
|
PyObject_GetAttrString(_module, PYGAMEAPI_LOCAL_ENTRY); \
|
||||||
|
\
|
||||||
|
Py_DECREF(_module); \
|
||||||
|
if (_c_api != NULL && PyCapsule_CheckExact(_c_api)) { \
|
||||||
|
void **localptr = (void **)PyCapsule_GetPointer( \
|
||||||
|
_c_api, PG_CAPSULE_NAME(#module)); \
|
||||||
|
\
|
||||||
|
if (localptr != NULL) { \
|
||||||
|
memcpy(api_root + PYGAMEAPI_##MODULE##_FIRSTSLOT, \
|
||||||
|
localptr, \
|
||||||
|
sizeof(void **) * PYGAMEAPI_##MODULE##_NUMSLOTS); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
Py_XDECREF(_c_api); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef NO_PYGAME_C_API
|
||||||
|
#define IMPORT_PYGAME_MODULE(module, MODULE) \
|
||||||
|
_IMPORT_PYGAME_MODULE(module, MODULE, PyGAME_C_API)
|
||||||
|
#define PYGAMEAPI_TOTALSLOTS \
|
||||||
|
(PYGAMEAPI_MATH_FIRSTSLOT + PYGAMEAPI_MATH_NUMSLOTS)
|
||||||
|
|
||||||
|
#ifdef PYGAME_H
|
||||||
|
void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS] = {NULL};
|
||||||
|
#else
|
||||||
|
extern void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS];
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if PG_HAVE_CAPSULE
|
||||||
|
#define encapsulate_api(ptr, module) \
|
||||||
|
PyCapsule_New(ptr, PG_CAPSULE_NAME(module), NULL)
|
||||||
|
#else
|
||||||
|
#define encapsulate_api(ptr, module) PyCObject_FromVoidPtr(ptr, NULL)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef PG_INLINE
|
||||||
|
#if defined(__clang__)
|
||||||
|
#define PG_INLINE __inline__ __attribute__((__unused__))
|
||||||
|
#elif defined(__GNUC__)
|
||||||
|
#define PG_INLINE __inline__
|
||||||
|
#elif defined(_MSC_VER)
|
||||||
|
#define PG_INLINE __inline
|
||||||
|
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
|
||||||
|
#define PG_INLINE inline
|
||||||
|
#else
|
||||||
|
#define PG_INLINE
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*last platform compiler stuff*/
|
||||||
|
#if defined(macintosh) && defined(__MWERKS__) || defined(__SYMBIAN32__)
|
||||||
|
#define PYGAME_EXPORT __declspec(export)
|
||||||
|
#else
|
||||||
|
#define PYGAME_EXPORT
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* PYGAME_H */
|
31
venv/Include/site/python3.7/pygame/_surface.h
Normal file
31
venv/Include/site/python3.7/pygame/_surface.h
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
Copyright (C) 2007 Marcus von Appen
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _SURFACE_H
|
||||||
|
#define _SURFACE_H
|
||||||
|
|
||||||
|
#include "_pygame.h"
|
||||||
|
#include "surface.h"
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
146
venv/Include/site/python3.7/pygame/bitmask.h
Normal file
146
venv/Include/site/python3.7/pygame/bitmask.h
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
/*
|
||||||
|
Bitmask 1.7 - A pixel-perfect collision detection library.
|
||||||
|
|
||||||
|
Copyright (C) 2002-2005 Ulf Ekstrom except for the bitcount
|
||||||
|
function which is copyright (C) Donald W. Gillies, 1992.
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*/
|
||||||
|
#ifndef BITMASK_H
|
||||||
|
#define BITMASK_H
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <limits.h>
|
||||||
|
/* Define INLINE for different compilers. If your compiler does not
|
||||||
|
support inlining then there might be a performance hit in
|
||||||
|
bitmask_overlap_area().
|
||||||
|
*/
|
||||||
|
#ifndef INLINE
|
||||||
|
# ifdef __GNUC__
|
||||||
|
# define INLINE inline
|
||||||
|
# else
|
||||||
|
# ifdef _MSC_VER
|
||||||
|
# define INLINE __inline
|
||||||
|
# else
|
||||||
|
# define INLINE
|
||||||
|
# endif
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define BITMASK_W unsigned long int
|
||||||
|
#define BITMASK_W_LEN (sizeof(BITMASK_W)*CHAR_BIT)
|
||||||
|
#define BITMASK_W_MASK (BITMASK_W_LEN - 1)
|
||||||
|
#define BITMASK_N(n) ((BITMASK_W)1 << (n))
|
||||||
|
|
||||||
|
typedef struct bitmask
|
||||||
|
{
|
||||||
|
int w,h;
|
||||||
|
BITMASK_W bits[1];
|
||||||
|
} bitmask_t;
|
||||||
|
|
||||||
|
/* Creates a bitmask of width w and height h, where
|
||||||
|
w and h must both be greater than or equal to 0.
|
||||||
|
The mask is automatically cleared when created.
|
||||||
|
*/
|
||||||
|
bitmask_t *bitmask_create(int w, int h);
|
||||||
|
|
||||||
|
/* Frees all the memory allocated by bitmask_create for m. */
|
||||||
|
void bitmask_free(bitmask_t *m);
|
||||||
|
|
||||||
|
/* Clears all bits in the mask */
|
||||||
|
void bitmask_clear(bitmask_t *m);
|
||||||
|
|
||||||
|
/* Sets all bits in the mask */
|
||||||
|
void bitmask_fill(bitmask_t *m);
|
||||||
|
|
||||||
|
/* Flips all bits in the mask */
|
||||||
|
void bitmask_invert(bitmask_t *m);
|
||||||
|
|
||||||
|
/* Counts the bits in the mask */
|
||||||
|
unsigned int bitmask_count(bitmask_t *m);
|
||||||
|
|
||||||
|
/* Returns nonzero if the bit at (x,y) is set. Coordinates start at
|
||||||
|
(0,0) */
|
||||||
|
static INLINE int bitmask_getbit(const bitmask_t *m, int x, int y)
|
||||||
|
{
|
||||||
|
return (m->bits[x/BITMASK_W_LEN*m->h + y] & BITMASK_N(x & BITMASK_W_MASK)) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Sets the bit at (x,y) */
|
||||||
|
static INLINE void bitmask_setbit(bitmask_t *m, int x, int y)
|
||||||
|
{
|
||||||
|
m->bits[x/BITMASK_W_LEN*m->h + y] |= BITMASK_N(x & BITMASK_W_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Clears the bit at (x,y) */
|
||||||
|
static INLINE void bitmask_clearbit(bitmask_t *m, int x, int y)
|
||||||
|
{
|
||||||
|
m->bits[x/BITMASK_W_LEN*m->h + y] &= ~BITMASK_N(x & BITMASK_W_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Returns nonzero if the masks overlap with the given offset.
|
||||||
|
The overlap tests uses the following offsets (which may be negative):
|
||||||
|
|
||||||
|
+----+----------..
|
||||||
|
|A | yoffset
|
||||||
|
| +-+----------..
|
||||||
|
+--|B
|
||||||
|
|xoffset
|
||||||
|
| |
|
||||||
|
: :
|
||||||
|
*/
|
||||||
|
int bitmask_overlap(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
/* Like bitmask_overlap(), but will also give a point of intersection.
|
||||||
|
x and y are given in the coordinates of mask a, and are untouched
|
||||||
|
if there is no overlap. */
|
||||||
|
int bitmask_overlap_pos(const bitmask_t *a, const bitmask_t *b,
|
||||||
|
int xoffset, int yoffset, int *x, int *y);
|
||||||
|
|
||||||
|
/* Returns the number of overlapping 'pixels' */
|
||||||
|
int bitmask_overlap_area(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
/* Fills a mask with the overlap of two other masks. A bitwise AND. */
|
||||||
|
void bitmask_overlap_mask (const bitmask_t *a, const bitmask_t *b, bitmask_t *c, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
/* Draws mask b onto mask a (bitwise OR). Can be used to compose large
|
||||||
|
(game background?) mask from several submasks, which may speed up
|
||||||
|
the testing. */
|
||||||
|
|
||||||
|
void bitmask_draw(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
void bitmask_erase(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
/* Return a new scaled bitmask, with dimensions w*h. The quality of the
|
||||||
|
scaling may not be perfect for all circumstances, but it should
|
||||||
|
be reasonable. If either w or h is 0 a clear 1x1 mask is returned. */
|
||||||
|
bitmask_t *bitmask_scale(const bitmask_t *m, int w, int h);
|
||||||
|
|
||||||
|
/* Convolve b into a, drawing the output into o, shifted by offset. If offset
|
||||||
|
* is 0, then the (x,y) bit will be set if and only if
|
||||||
|
* bitmask_overlap(a, b, x - b->w - 1, y - b->h - 1) returns true.
|
||||||
|
*
|
||||||
|
* Modifies bits o[xoffset ... xoffset + a->w + b->w - 1)
|
||||||
|
* [yoffset ... yoffset + a->h + b->h - 1). */
|
||||||
|
void bitmask_convolve(const bitmask_t *a, const bitmask_t *b, bitmask_t *o, int xoffset, int yoffset);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
} /* End of extern "C" { */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
201
venv/Include/site/python3.7/pygame/camera.h
Normal file
201
venv/Include/site/python3.7/pygame/camera.h
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "pygame.h"
|
||||||
|
#include "doc/camera_doc.h"
|
||||||
|
|
||||||
|
#if defined(__unix__)
|
||||||
|
#include <structmember.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include <fcntl.h> /* low-level i/o */
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#include <sys/ioctl.h>
|
||||||
|
|
||||||
|
/* on freebsd there is no asm/types */
|
||||||
|
#ifdef linux
|
||||||
|
#include <asm/types.h> /* for videodev2.h */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <linux/videodev2.h>
|
||||||
|
#elif defined(__APPLE__)
|
||||||
|
#include <AvailabilityMacros.h>
|
||||||
|
/* We support OSX 10.6 and below. */
|
||||||
|
#if __MAC_OS_X_VERSION_MAX_ALLOWED <= 1060
|
||||||
|
#define PYGAME_MAC_CAMERA_OLD 1
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(PYGAME_MAC_CAMERA_OLD)
|
||||||
|
#include <QuickTime/QuickTime.h>
|
||||||
|
#include <QuickTime/Movies.h>
|
||||||
|
#include <QuickTime/ImageCompression.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* some constants used which are not defined on non-v4l machines. */
|
||||||
|
#ifndef V4L2_PIX_FMT_RGB24
|
||||||
|
#define V4L2_PIX_FMT_RGB24 'RGB3'
|
||||||
|
#endif
|
||||||
|
#ifndef V4L2_PIX_FMT_RGB444
|
||||||
|
#define V4L2_PIX_FMT_RGB444 'R444'
|
||||||
|
#endif
|
||||||
|
#ifndef V4L2_PIX_FMT_YUYV
|
||||||
|
#define V4L2_PIX_FMT_YUYV 'YUYV'
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define CLEAR(x) memset (&(x), 0, sizeof (x))
|
||||||
|
#define SAT(c) if (c & (~255)) { if (c < 0) c = 0; else c = 255; }
|
||||||
|
#define SAT2(c) ((c) & (~255) ? ((c) < 0 ? 0 : 255) : (c))
|
||||||
|
#define DEFAULT_WIDTH 640
|
||||||
|
#define DEFAULT_HEIGHT 480
|
||||||
|
#define RGB_OUT 1
|
||||||
|
#define YUV_OUT 2
|
||||||
|
#define HSV_OUT 4
|
||||||
|
#define CAM_V4L 1 /* deprecated. the incomplete support in pygame was removed */
|
||||||
|
#define CAM_V4L2 2
|
||||||
|
|
||||||
|
struct buffer {
|
||||||
|
void * start;
|
||||||
|
size_t length;
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(__unix__)
|
||||||
|
typedef struct pgCameraObject {
|
||||||
|
PyObject_HEAD
|
||||||
|
char* device_name;
|
||||||
|
int camera_type;
|
||||||
|
unsigned long pixelformat;
|
||||||
|
unsigned int color_out;
|
||||||
|
struct buffer* buffers;
|
||||||
|
unsigned int n_buffers;
|
||||||
|
int width;
|
||||||
|
int height;
|
||||||
|
int size;
|
||||||
|
int hflip;
|
||||||
|
int vflip;
|
||||||
|
int brightness;
|
||||||
|
int fd;
|
||||||
|
} pgCameraObject;
|
||||||
|
#elif defined(PYGAME_MAC_CAMERA_OLD)
|
||||||
|
typedef struct pgCameraObject {
|
||||||
|
PyObject_HEAD
|
||||||
|
char* device_name; /* unieke name of the device */
|
||||||
|
OSType pixelformat;
|
||||||
|
unsigned int color_out;
|
||||||
|
SeqGrabComponent component; /* A type used by the Sequence Grabber API */
|
||||||
|
SGChannel channel; /* Channel of the Sequence Grabber */
|
||||||
|
GWorldPtr gworld; /* Pointer to the struct that holds the data of the captured image */
|
||||||
|
Rect boundsRect; /* bounds of the image frame */
|
||||||
|
long size; /* size of the image in our buffer to draw */
|
||||||
|
int hflip;
|
||||||
|
int vflip;
|
||||||
|
short depth;
|
||||||
|
struct buffer pixels;
|
||||||
|
//struct buffer tmp_pixels /* place where the flipped image in temporarly stored if hflip or vflip is true.*/
|
||||||
|
} pgCameraObject;
|
||||||
|
|
||||||
|
#else
|
||||||
|
/* generic definition.
|
||||||
|
*/
|
||||||
|
|
||||||
|
typedef struct pgCameraObject {
|
||||||
|
PyObject_HEAD
|
||||||
|
char* device_name;
|
||||||
|
int camera_type;
|
||||||
|
unsigned long pixelformat;
|
||||||
|
unsigned int color_out;
|
||||||
|
struct buffer* buffers;
|
||||||
|
unsigned int n_buffers;
|
||||||
|
int width;
|
||||||
|
int height;
|
||||||
|
int size;
|
||||||
|
int hflip;
|
||||||
|
int vflip;
|
||||||
|
int brightness;
|
||||||
|
int fd;
|
||||||
|
} pgCameraObject;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* internal functions for colorspace conversion */
|
||||||
|
void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace);
|
||||||
|
void rgb24_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void rgb444_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void rgb_to_yuv (const void* src, void* dst, int length,
|
||||||
|
unsigned long source, SDL_PixelFormat* format);
|
||||||
|
void rgb_to_hsv (const void* src, void* dst, int length,
|
||||||
|
unsigned long source, SDL_PixelFormat* format);
|
||||||
|
void yuyv_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void yuyv_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void uyvy_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void uyvy_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format);
|
||||||
|
void sbggr8_to_rgb (const void* src, void* dst, int width, int height,
|
||||||
|
SDL_PixelFormat* format);
|
||||||
|
void yuv420_to_rgb (const void* src, void* dst, int width, int height,
|
||||||
|
SDL_PixelFormat* format);
|
||||||
|
void yuv420_to_yuv (const void* src, void* dst, int width, int height,
|
||||||
|
SDL_PixelFormat* format);
|
||||||
|
|
||||||
|
#if defined(__unix__)
|
||||||
|
/* internal functions specific to v4l2 */
|
||||||
|
char** v4l2_list_cameras (int* num_devices);
|
||||||
|
int v4l2_get_control (int fd, int id, int *value);
|
||||||
|
int v4l2_set_control (int fd, int id, int value);
|
||||||
|
PyObject* v4l2_read_raw (pgCameraObject* self);
|
||||||
|
int v4l2_xioctl (int fd, int request, void *arg);
|
||||||
|
int v4l2_process_image (pgCameraObject* self, const void *image,
|
||||||
|
unsigned int buffer_size, SDL_Surface* surf);
|
||||||
|
int v4l2_query_buffer (pgCameraObject* self);
|
||||||
|
int v4l2_read_frame (pgCameraObject* self, SDL_Surface* surf);
|
||||||
|
int v4l2_stop_capturing (pgCameraObject* self);
|
||||||
|
int v4l2_start_capturing (pgCameraObject* self);
|
||||||
|
int v4l2_uninit_device (pgCameraObject* self);
|
||||||
|
int v4l2_init_mmap (pgCameraObject* self);
|
||||||
|
int v4l2_init_device (pgCameraObject* self);
|
||||||
|
int v4l2_close_device (pgCameraObject* self);
|
||||||
|
int v4l2_open_device (pgCameraObject* self);
|
||||||
|
|
||||||
|
#elif defined(PYGAME_MAC_CAMERA_OLD)
|
||||||
|
/* internal functions specific to mac */
|
||||||
|
char** mac_list_cameras(int* num_devices);
|
||||||
|
int mac_open_device (pgCameraObject* self);
|
||||||
|
int mac_init_device(pgCameraObject* self);
|
||||||
|
int mac_close_device (pgCameraObject* self);
|
||||||
|
int mac_start_capturing(pgCameraObject* self);
|
||||||
|
int mac_stop_capturing (pgCameraObject* self);
|
||||||
|
|
||||||
|
int mac_get_control(pgCameraObject* self, int id, int* value);
|
||||||
|
int mac_set_control(pgCameraObject* self, int id, int value);
|
||||||
|
|
||||||
|
PyObject* mac_read_raw(pgCameraObject *self);
|
||||||
|
int mac_read_frame(pgCameraObject* self, SDL_Surface* surf);
|
||||||
|
int mac_camera_idle(pgCameraObject* self);
|
||||||
|
int mac_copy_gworld_to_surface(pgCameraObject* self, SDL_Surface* surf);
|
||||||
|
|
||||||
|
void flip_image(const void* image, void* flipped_image, int width, int height,
|
||||||
|
short depth, int hflip, int vflip);
|
||||||
|
|
||||||
|
#endif
|
48
venv/Include/site/python3.7/pygame/fastevents.h
Normal file
48
venv/Include/site/python3.7/pygame/fastevents.h
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
#ifndef _FASTEVENTS_H_
|
||||||
|
#define _FASTEVENTS_H_
|
||||||
|
/*
|
||||||
|
NET2 is a threaded, event based, network IO library for SDL.
|
||||||
|
Copyright (C) 2002 Bob Pendleton
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Lesser General Public License
|
||||||
|
as published by the Free Software Foundation; either version 2.1
|
||||||
|
of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
||||||
|
02111-1307 USA
|
||||||
|
|
||||||
|
If you do not wish to comply with the terms of the LGPL please
|
||||||
|
contact the author as other terms are available for a fee.
|
||||||
|
|
||||||
|
Bob Pendleton
|
||||||
|
Bob@Pendleton.com
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "SDL.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int FE_Init(void); // Initialize FE
|
||||||
|
void FE_Quit(void); // shutdown FE
|
||||||
|
|
||||||
|
void FE_PumpEvents(void); // replacement for SDL_PumpEvents
|
||||||
|
int FE_PollEvent(SDL_Event *event); // replacement for SDL_PollEvent
|
||||||
|
int FE_WaitEvent(SDL_Event *event); // replacement for SDL_WaitEvent
|
||||||
|
int FE_PushEvent(SDL_Event *event); // replacement for SDL_PushEvent
|
||||||
|
|
||||||
|
char *FE_GetError(void); // get the last error
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
57
venv/Include/site/python3.7/pygame/font.h
Normal file
57
venv/Include/site/python3.7/pygame/font.h
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
#if defined(HAVE_SNPRINTF) /* also defined in SDL_ttf (SDL.h) */
|
||||||
|
#undef HAVE_SNPRINTF /* remove GCC macro redefine warning */
|
||||||
|
#endif
|
||||||
|
#include <SDL_ttf.h>
|
||||||
|
|
||||||
|
|
||||||
|
/* test font initialization */
|
||||||
|
#define FONT_INIT_CHECK() \
|
||||||
|
if(!(*(int*)PyFONT_C_API[2])) \
|
||||||
|
return RAISE(pgExc_SDLError, "font system not initialized")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#define PYGAMEAPI_FONT_FIRSTSLOT 0
|
||||||
|
#define PYGAMEAPI_FONT_NUMSLOTS 3
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
TTF_Font* font;
|
||||||
|
PyObject* weakreflist;
|
||||||
|
} PyFontObject;
|
||||||
|
#define PyFont_AsFont(x) (((PyFontObject*)x)->font)
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_FONT_INTERNAL
|
||||||
|
#define PyFont_Check(x) ((x)->ob_type == (PyTypeObject*)PyFONT_C_API[0])
|
||||||
|
#define PyFont_Type (*(PyTypeObject*)PyFONT_C_API[0])
|
||||||
|
#define PyFont_New (*(PyObject*(*)(TTF_Font*))PyFONT_C_API[1])
|
||||||
|
/*slot 2 taken by FONT_INIT_CHECK*/
|
||||||
|
|
||||||
|
#define import_pygame_font() \
|
||||||
|
_IMPORT_PYGAME_MODULE(font, FONT, PyFONT_C_API)
|
||||||
|
|
||||||
|
static void* PyFONT_C_API[PYGAMEAPI_FONT_NUMSLOTS] = {NULL};
|
||||||
|
#endif
|
||||||
|
|
137
venv/Include/site/python3.7/pygame/freetype.h
Normal file
137
venv/Include/site/python3.7/pygame/freetype.h
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2009 Vicent Marti
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
*/
|
||||||
|
#ifndef _PYGAME_FREETYPE_H_
|
||||||
|
#define _PYGAME_FREETYPE_H_
|
||||||
|
|
||||||
|
#define PGFT_PYGAME1_COMPAT
|
||||||
|
#define HAVE_PYGAME_SDL_VIDEO
|
||||||
|
#define HAVE_PYGAME_SDL_RWOPS
|
||||||
|
|
||||||
|
#include "pygame.h"
|
||||||
|
#include "pgcompat.h"
|
||||||
|
|
||||||
|
#if PY3
|
||||||
|
# define IS_PYTHON_3
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <ft2build.h>
|
||||||
|
#include FT_FREETYPE_H
|
||||||
|
#include FT_CACHE_H
|
||||||
|
#include FT_XFREE86_H
|
||||||
|
#include FT_TRIGONOMETRY_H
|
||||||
|
|
||||||
|
/**********************************************************
|
||||||
|
* Global module constants
|
||||||
|
**********************************************************/
|
||||||
|
|
||||||
|
/* Render styles */
|
||||||
|
#define FT_STYLE_NORMAL 0x00
|
||||||
|
#define FT_STYLE_STRONG 0x01
|
||||||
|
#define FT_STYLE_OBLIQUE 0x02
|
||||||
|
#define FT_STYLE_UNDERLINE 0x04
|
||||||
|
#define FT_STYLE_WIDE 0x08
|
||||||
|
#define FT_STYLE_DEFAULT 0xFF
|
||||||
|
|
||||||
|
/* Bounding box modes */
|
||||||
|
#define FT_BBOX_EXACT FT_GLYPH_BBOX_SUBPIXELS
|
||||||
|
#define FT_BBOX_EXACT_GRIDFIT FT_GLYPH_BBOX_GRIDFIT
|
||||||
|
#define FT_BBOX_PIXEL FT_GLYPH_BBOX_TRUNCATE
|
||||||
|
#define FT_BBOX_PIXEL_GRIDFIT FT_GLYPH_BBOX_PIXELS
|
||||||
|
|
||||||
|
/* Rendering flags */
|
||||||
|
#define FT_RFLAG_NONE (0)
|
||||||
|
#define FT_RFLAG_ANTIALIAS (1 << 0)
|
||||||
|
#define FT_RFLAG_AUTOHINT (1 << 1)
|
||||||
|
#define FT_RFLAG_VERTICAL (1 << 2)
|
||||||
|
#define FT_RFLAG_HINTED (1 << 3)
|
||||||
|
#define FT_RFLAG_KERNING (1 << 4)
|
||||||
|
#define FT_RFLAG_TRANSFORM (1 << 5)
|
||||||
|
#define FT_RFLAG_PAD (1 << 6)
|
||||||
|
#define FT_RFLAG_ORIGIN (1 << 7)
|
||||||
|
#define FT_RFLAG_UCS4 (1 << 8)
|
||||||
|
#define FT_RFLAG_USE_BITMAP_STRIKES (1 << 9)
|
||||||
|
#define FT_RFLAG_DEFAULTS (FT_RFLAG_HINTED | \
|
||||||
|
FT_RFLAG_USE_BITMAP_STRIKES | \
|
||||||
|
FT_RFLAG_ANTIALIAS)
|
||||||
|
|
||||||
|
|
||||||
|
#define FT_RENDER_NEWBYTEARRAY 0x0
|
||||||
|
#define FT_RENDER_NEWSURFACE 0x1
|
||||||
|
#define FT_RENDER_EXISTINGSURFACE 0x2
|
||||||
|
|
||||||
|
/**********************************************************
|
||||||
|
* Global module types
|
||||||
|
**********************************************************/
|
||||||
|
|
||||||
|
typedef struct _scale_s {
|
||||||
|
FT_UInt x, y;
|
||||||
|
} Scale_t;
|
||||||
|
typedef FT_Angle Angle_t;
|
||||||
|
|
||||||
|
struct fontinternals_;
|
||||||
|
struct freetypeinstance_;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
FT_Long font_index;
|
||||||
|
FT_Open_Args open_args;
|
||||||
|
} pgFontId;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
pgFontId id;
|
||||||
|
PyObject *path;
|
||||||
|
int is_scalable;
|
||||||
|
|
||||||
|
Scale_t face_size;
|
||||||
|
FT_Int16 style;
|
||||||
|
FT_Int16 render_flags;
|
||||||
|
double strength;
|
||||||
|
double underline_adjustment;
|
||||||
|
FT_UInt resolution;
|
||||||
|
Angle_t rotation;
|
||||||
|
FT_Matrix transform;
|
||||||
|
FT_Byte fgcolor[4];
|
||||||
|
|
||||||
|
struct freetypeinstance_ *freetype; /* Personal reference */
|
||||||
|
struct fontinternals_ *_internals;
|
||||||
|
} pgFontObject;
|
||||||
|
|
||||||
|
#define pgFont_IS_ALIVE(o) \
|
||||||
|
(((pgFontObject *)(o))->_internals != 0)
|
||||||
|
|
||||||
|
/**********************************************************
|
||||||
|
* Module declaration
|
||||||
|
**********************************************************/
|
||||||
|
#define PYGAMEAPI_FREETYPE_FIRSTSLOT 0
|
||||||
|
#define PYGAMEAPI_FREETYPE_NUMSLOTS 2
|
||||||
|
|
||||||
|
#ifndef PYGAME_FREETYPE_INTERNAL
|
||||||
|
|
||||||
|
#define pgFont_Check(x) ((x)->ob_type == (PyTypeObject*)PgFREETYPE_C_API[0])
|
||||||
|
#define pgFont_Type (*(PyTypeObject*)PgFREETYPE_C_API[1])
|
||||||
|
#define pgFont_New (*(PyObject*(*)(const char*, long))PgFREETYPE_C_API[1])
|
||||||
|
|
||||||
|
#define import_pygame_freetype() \
|
||||||
|
_IMPORT_PYGAME_MODULE(freetype, FREETYPE, PgFREETYPE_C_API)
|
||||||
|
|
||||||
|
static void *PgFREETYPE_C_API[PYGAMEAPI_FREETYPE_NUMSLOTS] = {0};
|
||||||
|
#endif /* PYGAME_FREETYPE_INTERNAL */
|
||||||
|
|
||||||
|
#endif /* _PYGAME_FREETYPE_H_ */
|
25
venv/Include/site/python3.7/pygame/mask.h
Normal file
25
venv/Include/site/python3.7/pygame/mask.h
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
#include <Python.h>
|
||||||
|
#include "bitmask.h"
|
||||||
|
|
||||||
|
#define PYGAMEAPI_MASK_FIRSTSLOT 0
|
||||||
|
#define PYGAMEAPI_MASK_NUMSLOTS 1
|
||||||
|
#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API"
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
bitmask_t *mask;
|
||||||
|
} pgMaskObject;
|
||||||
|
|
||||||
|
#define pgMask_AsBitmap(x) (((pgMaskObject*)x)->mask)
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_MASK_INTERNAL
|
||||||
|
|
||||||
|
#define pgMask_Type (*(PyTypeObject*)PyMASK_C_API[0])
|
||||||
|
#define pgMask_Check(x) ((x)->ob_type == &pgMask_Type)
|
||||||
|
|
||||||
|
#define import_pygame_mask() \
|
||||||
|
_IMPORT_PYGAME_MODULE(mask, MASK, PyMASK_C_API)
|
||||||
|
|
||||||
|
static void* PyMASK_C_API[PYGAMEAPI_MASK_NUMSLOTS] = {NULL};
|
||||||
|
#endif /* #ifndef PYGAMEAPI_MASK_INTERNAL */
|
||||||
|
|
65
venv/Include/site/python3.7/pygame/mixer.h
Normal file
65
venv/Include/site/python3.7/pygame/mixer.h
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
#include <SDL_mixer.h>
|
||||||
|
#include <structmember.h>
|
||||||
|
|
||||||
|
|
||||||
|
/* test mixer initializations */
|
||||||
|
#define MIXER_INIT_CHECK() \
|
||||||
|
if(!SDL_WasInit(SDL_INIT_AUDIO)) \
|
||||||
|
return RAISE(pgExc_SDLError, "mixer not initialized")
|
||||||
|
|
||||||
|
|
||||||
|
#define PYGAMEAPI_MIXER_FIRSTSLOT 0
|
||||||
|
#define PYGAMEAPI_MIXER_NUMSLOTS 7
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
Mix_Chunk *chunk;
|
||||||
|
Uint8 *mem;
|
||||||
|
PyObject *weakreflist;
|
||||||
|
} pgSoundObject;
|
||||||
|
typedef struct {
|
||||||
|
PyObject_HEAD
|
||||||
|
int chan;
|
||||||
|
} pgChannelObject;
|
||||||
|
#define pgSound_AsChunk(x) (((pgSoundObject*)x)->chunk)
|
||||||
|
#define pgChannel_AsInt(x) (((pgChannelObject*)x)->chan)
|
||||||
|
|
||||||
|
#ifndef PYGAMEAPI_MIXER_INTERNAL
|
||||||
|
#define pgSound_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[0])
|
||||||
|
#define pgSound_Type (*(PyTypeObject*)pgMIXER_C_API[0])
|
||||||
|
#define pgSound_New (*(PyObject*(*)(Mix_Chunk*))pgMIXER_C_API[1])
|
||||||
|
#define pgSound_Play (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[2])
|
||||||
|
#define pgChannel_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[3])
|
||||||
|
#define pgChannel_Type (*(PyTypeObject*)pgMIXER_C_API[3])
|
||||||
|
#define pgChannel_New (*(PyObject*(*)(int))pgMIXER_C_API[4])
|
||||||
|
#define pgMixer_AutoInit (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[5])
|
||||||
|
#define pgMixer_AutoQuit (*(void(*)(void))pgMIXER_C_API[6])
|
||||||
|
|
||||||
|
#define import_pygame_mixer() \
|
||||||
|
_IMPORT_PYGAME_MODULE(mixer, MIXER, pgMIXER_C_API)
|
||||||
|
|
||||||
|
static void* pgMIXER_C_API[PYGAMEAPI_MIXER_NUMSLOTS] = {NULL};
|
||||||
|
#endif
|
||||||
|
|
123
venv/Include/site/python3.7/pygame/palette.h
Normal file
123
venv/Include/site/python3.7/pygame/palette.h
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef PALETTE_H
|
||||||
|
#define PALETTE_H
|
||||||
|
|
||||||
|
#include <SDL.h>
|
||||||
|
|
||||||
|
/* SDL 2 does not assign a default palette color scheme to a new 8 bit
|
||||||
|
* surface. Instead, the palette is set all white. This defines the SDL 1.2
|
||||||
|
* default palette.
|
||||||
|
*/
|
||||||
|
static const SDL_Color default_palette_colors[] = {
|
||||||
|
{0, 0, 0, 255}, {0, 0, 85, 255}, {0, 0, 170, 255},
|
||||||
|
{0, 0, 255, 255}, {0, 36, 0, 255}, {0, 36, 85, 255},
|
||||||
|
{0, 36, 170, 255}, {0, 36, 255, 255}, {0, 73, 0, 255},
|
||||||
|
{0, 73, 85, 255}, {0, 73, 170, 255}, {0, 73, 255, 255},
|
||||||
|
{0, 109, 0, 255}, {0, 109, 85, 255}, {0, 109, 170, 255},
|
||||||
|
{0, 109, 255, 255}, {0, 146, 0, 255}, {0, 146, 85, 255},
|
||||||
|
{0, 146, 170, 255}, {0, 146, 255, 255}, {0, 182, 0, 255},
|
||||||
|
{0, 182, 85, 255}, {0, 182, 170, 255}, {0, 182, 255, 255},
|
||||||
|
{0, 219, 0, 255}, {0, 219, 85, 255}, {0, 219, 170, 255},
|
||||||
|
{0, 219, 255, 255}, {0, 255, 0, 255}, {0, 255, 85, 255},
|
||||||
|
{0, 255, 170, 255}, {0, 255, 255, 255}, {85, 0, 0, 255},
|
||||||
|
{85, 0, 85, 255}, {85, 0, 170, 255}, {85, 0, 255, 255},
|
||||||
|
{85, 36, 0, 255}, {85, 36, 85, 255}, {85, 36, 170, 255},
|
||||||
|
{85, 36, 255, 255}, {85, 73, 0, 255}, {85, 73, 85, 255},
|
||||||
|
{85, 73, 170, 255}, {85, 73, 255, 255}, {85, 109, 0, 255},
|
||||||
|
{85, 109, 85, 255}, {85, 109, 170, 255}, {85, 109, 255, 255},
|
||||||
|
{85, 146, 0, 255}, {85, 146, 85, 255}, {85, 146, 170, 255},
|
||||||
|
{85, 146, 255, 255}, {85, 182, 0, 255}, {85, 182, 85, 255},
|
||||||
|
{85, 182, 170, 255}, {85, 182, 255, 255}, {85, 219, 0, 255},
|
||||||
|
{85, 219, 85, 255}, {85, 219, 170, 255}, {85, 219, 255, 255},
|
||||||
|
{85, 255, 0, 255}, {85, 255, 85, 255}, {85, 255, 170, 255},
|
||||||
|
{85, 255, 255, 255}, {170, 0, 0, 255}, {170, 0, 85, 255},
|
||||||
|
{170, 0, 170, 255}, {170, 0, 255, 255}, {170, 36, 0, 255},
|
||||||
|
{170, 36, 85, 255}, {170, 36, 170, 255}, {170, 36, 255, 255},
|
||||||
|
{170, 73, 0, 255}, {170, 73, 85, 255}, {170, 73, 170, 255},
|
||||||
|
{170, 73, 255, 255}, {170, 109, 0, 255}, {170, 109, 85, 255},
|
||||||
|
{170, 109, 170, 255}, {170, 109, 255, 255}, {170, 146, 0, 255},
|
||||||
|
{170, 146, 85, 255}, {170, 146, 170, 255}, {170, 146, 255, 255},
|
||||||
|
{170, 182, 0, 255}, {170, 182, 85, 255}, {170, 182, 170, 255},
|
||||||
|
{170, 182, 255, 255}, {170, 219, 0, 255}, {170, 219, 85, 255},
|
||||||
|
{170, 219, 170, 255}, {170, 219, 255, 255}, {170, 255, 0, 255},
|
||||||
|
{170, 255, 85, 255}, {170, 255, 170, 255}, {170, 255, 255, 255},
|
||||||
|
{255, 0, 0, 255}, {255, 0, 85, 255}, {255, 0, 170, 255},
|
||||||
|
{255, 0, 255, 255}, {255, 36, 0, 255}, {255, 36, 85, 255},
|
||||||
|
{255, 36, 170, 255}, {255, 36, 255, 255}, {255, 73, 0, 255},
|
||||||
|
{255, 73, 85, 255}, {255, 73, 170, 255}, {255, 73, 255, 255},
|
||||||
|
{255, 109, 0, 255}, {255, 109, 85, 255}, {255, 109, 170, 255},
|
||||||
|
{255, 109, 255, 255}, {255, 146, 0, 255}, {255, 146, 85, 255},
|
||||||
|
{255, 146, 170, 255}, {255, 146, 255, 255}, {255, 182, 0, 255},
|
||||||
|
{255, 182, 85, 255}, {255, 182, 170, 255}, {255, 182, 255, 255},
|
||||||
|
{255, 219, 0, 255}, {255, 219, 85, 255}, {255, 219, 170, 255},
|
||||||
|
{255, 219, 255, 255}, {255, 255, 0, 255}, {255, 255, 85, 255},
|
||||||
|
{255, 255, 170, 255}, {255, 255, 255, 255}, {0, 0, 0, 255},
|
||||||
|
{0, 0, 85, 255}, {0, 0, 170, 255}, {0, 0, 255, 255},
|
||||||
|
{0, 36, 0, 255}, {0, 36, 85, 255}, {0, 36, 170, 255},
|
||||||
|
{0, 36, 255, 255}, {0, 73, 0, 255}, {0, 73, 85, 255},
|
||||||
|
{0, 73, 170, 255}, {0, 73, 255, 255}, {0, 109, 0, 255},
|
||||||
|
{0, 109, 85, 255}, {0, 109, 170, 255}, {0, 109, 255, 255},
|
||||||
|
{0, 146, 0, 255}, {0, 146, 85, 255}, {0, 146, 170, 255},
|
||||||
|
{0, 146, 255, 255}, {0, 182, 0, 255}, {0, 182, 85, 255},
|
||||||
|
{0, 182, 170, 255}, {0, 182, 255, 255}, {0, 219, 0, 255},
|
||||||
|
{0, 219, 85, 255}, {0, 219, 170, 255}, {0, 219, 255, 255},
|
||||||
|
{0, 255, 0, 255}, {0, 255, 85, 255}, {0, 255, 170, 255},
|
||||||
|
{0, 255, 255, 255}, {85, 0, 0, 255}, {85, 0, 85, 255},
|
||||||
|
{85, 0, 170, 255}, {85, 0, 255, 255}, {85, 36, 0, 255},
|
||||||
|
{85, 36, 85, 255}, {85, 36, 170, 255}, {85, 36, 255, 255},
|
||||||
|
{85, 73, 0, 255}, {85, 73, 85, 255}, {85, 73, 170, 255},
|
||||||
|
{85, 73, 255, 255}, {85, 109, 0, 255}, {85, 109, 85, 255},
|
||||||
|
{85, 109, 170, 255}, {85, 109, 255, 255}, {85, 146, 0, 255},
|
||||||
|
{85, 146, 85, 255}, {85, 146, 170, 255}, {85, 146, 255, 255},
|
||||||
|
{85, 182, 0, 255}, {85, 182, 85, 255}, {85, 182, 170, 255},
|
||||||
|
{85, 182, 255, 255}, {85, 219, 0, 255}, {85, 219, 85, 255},
|
||||||
|
{85, 219, 170, 255}, {85, 219, 255, 255}, {85, 255, 0, 255},
|
||||||
|
{85, 255, 85, 255}, {85, 255, 170, 255}, {85, 255, 255, 255},
|
||||||
|
{170, 0, 0, 255}, {170, 0, 85, 255}, {170, 0, 170, 255},
|
||||||
|
{170, 0, 255, 255}, {170, 36, 0, 255}, {170, 36, 85, 255},
|
||||||
|
{170, 36, 170, 255}, {170, 36, 255, 255}, {170, 73, 0, 255},
|
||||||
|
{170, 73, 85, 255}, {170, 73, 170, 255}, {170, 73, 255, 255},
|
||||||
|
{170, 109, 0, 255}, {170, 109, 85, 255}, {170, 109, 170, 255},
|
||||||
|
{170, 109, 255, 255}, {170, 146, 0, 255}, {170, 146, 85, 255},
|
||||||
|
{170, 146, 170, 255}, {170, 146, 255, 255}, {170, 182, 0, 255},
|
||||||
|
{170, 182, 85, 255}, {170, 182, 170, 255}, {170, 182, 255, 255},
|
||||||
|
{170, 219, 0, 255}, {170, 219, 85, 255}, {170, 219, 170, 255},
|
||||||
|
{170, 219, 255, 255}, {170, 255, 0, 255}, {170, 255, 85, 255},
|
||||||
|
{170, 255, 170, 255}, {170, 255, 255, 255}, {255, 0, 0, 255},
|
||||||
|
{255, 0, 85, 255}, {255, 0, 170, 255}, {255, 0, 255, 255},
|
||||||
|
{255, 36, 0, 255}, {255, 36, 85, 255}, {255, 36, 170, 255},
|
||||||
|
{255, 36, 255, 255}, {255, 73, 0, 255}, {255, 73, 85, 255},
|
||||||
|
{255, 73, 170, 255}, {255, 73, 255, 255}, {255, 109, 0, 255},
|
||||||
|
{255, 109, 85, 255}, {255, 109, 170, 255}, {255, 109, 255, 255},
|
||||||
|
{255, 146, 0, 255}, {255, 146, 85, 255}, {255, 146, 170, 255},
|
||||||
|
{255, 146, 255, 255}, {255, 182, 0, 255}, {255, 182, 85, 255},
|
||||||
|
{255, 182, 170, 255}, {255, 182, 255, 255}, {255, 219, 0, 255},
|
||||||
|
{255, 219, 85, 255}, {255, 219, 170, 255}, {255, 219, 255, 255},
|
||||||
|
{255, 255, 0, 255}, {255, 255, 85, 255}, {255, 255, 170, 255},
|
||||||
|
{255, 255, 255, 255}};
|
||||||
|
|
||||||
|
static const int default_palette_size =
|
||||||
|
(int)(sizeof(default_palette_colors) / sizeof(SDL_Color));
|
||||||
|
|
||||||
|
#endif
|
26
venv/Include/site/python3.7/pygame/pgarrinter.h
Normal file
26
venv/Include/site/python3.7/pygame/pgarrinter.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
/* array structure interface version 3 declarations */
|
||||||
|
|
||||||
|
#if !defined(PG_ARRAYINTER_HEADER)
|
||||||
|
#define PG_ARRAYINTER_HEADER
|
||||||
|
|
||||||
|
static const int PAI_CONTIGUOUS = 0x01;
|
||||||
|
static const int PAI_FORTRAN = 0x02;
|
||||||
|
static const int PAI_ALIGNED = 0x100;
|
||||||
|
static const int PAI_NOTSWAPPED = 0x200;
|
||||||
|
static const int PAI_WRITEABLE = 0x400;
|
||||||
|
static const int PAI_ARR_HAS_DESCR = 0x800;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
int two; /* contains the integer 2 -- simple sanity check */
|
||||||
|
int nd; /* number of dimensions */
|
||||||
|
char typekind; /* kind in array -- character code of typestr */
|
||||||
|
int itemsize; /* size of each element */
|
||||||
|
int flags; /* flags indicating how the data should be */
|
||||||
|
/* interpreted */
|
||||||
|
Py_intptr_t *shape; /* A length-nd array of shape information */
|
||||||
|
Py_intptr_t *strides; /* A length-nd array of stride information */
|
||||||
|
void *data; /* A pointer to the first element of the array */
|
||||||
|
PyObject *descr; /* NULL or a data-description */
|
||||||
|
} PyArrayInterface;
|
||||||
|
|
||||||
|
#endif
|
52
venv/Include/site/python3.7/pygame/pgbufferproxy.h
Normal file
52
venv/Include/site/python3.7/pygame/pgbufferproxy.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
Copyright (C) 2007 Rene Dudfield, Richard Goedeken
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Bufferproxy module C api.
|
||||||
|
Depends on pygame.h being included first.
|
||||||
|
*/
|
||||||
|
#if !defined(PG_BUFPROXY_HEADER)
|
||||||
|
|
||||||
|
#define PYGAMEAPI_BUFPROXY_NUMSLOTS 4
|
||||||
|
#define PYGAMEAPI_BUFPROXY_FIRSTSLOT 0
|
||||||
|
|
||||||
|
#if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || defined(NO_PYGAME_C_API))
|
||||||
|
static void *PgBUFPROXY_C_API[PYGAMEAPI_BUFPROXY_NUMSLOTS];
|
||||||
|
|
||||||
|
typedef PyObject *(*_pgbufproxy_new_t)(PyObject *, getbufferproc);
|
||||||
|
typedef PyObject *(*_pgbufproxy_get_obj_t)(PyObject *);
|
||||||
|
typedef int (*_pgbufproxy_trip_t)(PyObject *);
|
||||||
|
|
||||||
|
#define pgBufproxy_Type (*(PyTypeObject*)PgBUFPROXY_C_API[0])
|
||||||
|
#define pgBufproxy_New (*(_pgbufproxy_new_t)PgBUFPROXY_C_API[1])
|
||||||
|
#define pgBufproxy_GetParent \
|
||||||
|
(*(_pgbufproxy_get_obj_t)PgBUFPROXY_C_API[2])
|
||||||
|
#define pgBufproxy_Trip (*(_pgbufproxy_trip_t)PgBUFPROXY_C_API[3])
|
||||||
|
#define pgBufproxy_Check(x) ((x)->ob_type == (pgBufproxy_Type))
|
||||||
|
#define import_pygame_bufferproxy() \
|
||||||
|
_IMPORT_PYGAME_MODULE(bufferproxy, BUFPROXY, PgBUFPROXY_C_API)
|
||||||
|
|
||||||
|
#endif /* #if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || ... */
|
||||||
|
|
||||||
|
#define PG_BUFPROXY_HEADER
|
||||||
|
|
||||||
|
#endif /* #if !defined(PG_BUFPROXY_HEADER) */
|
195
venv/Include/site/python3.7/pygame/pgcompat.h
Normal file
195
venv/Include/site/python3.7/pygame/pgcompat.h
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
/* Python 2.x/3.x compitibility tools
|
||||||
|
*/
|
||||||
|
|
||||||
|
#if !defined(PGCOMPAT_H)
|
||||||
|
#define PGCOMPAT_H
|
||||||
|
|
||||||
|
#if PY_MAJOR_VERSION >= 3
|
||||||
|
|
||||||
|
#define PY3 1
|
||||||
|
|
||||||
|
/* Define some aliases for the removed PyInt_* functions */
|
||||||
|
#define PyInt_Check(op) PyLong_Check(op)
|
||||||
|
#define PyInt_FromString PyLong_FromString
|
||||||
|
#define PyInt_FromUnicode PyLong_FromUnicode
|
||||||
|
#define PyInt_FromLong PyLong_FromLong
|
||||||
|
#define PyInt_FromSize_t PyLong_FromSize_t
|
||||||
|
#define PyInt_FromSsize_t PyLong_FromSsize_t
|
||||||
|
#define PyInt_AsLong PyLong_AsLong
|
||||||
|
#define PyInt_AsSsize_t PyLong_AsSsize_t
|
||||||
|
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
|
||||||
|
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
|
||||||
|
#define PyInt_AS_LONG PyLong_AS_LONG
|
||||||
|
#define PyNumber_Int PyNumber_Long
|
||||||
|
|
||||||
|
/* Weakrefs flags changed in 3.x */
|
||||||
|
#define Py_TPFLAGS_HAVE_WEAKREFS 0
|
||||||
|
|
||||||
|
/* Module init function returns new module instance. */
|
||||||
|
#define MODINIT_RETURN(x) return x
|
||||||
|
#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC PyInit_##mod_name (void)
|
||||||
|
#define DECREF_MOD(mod) Py_DECREF (mod)
|
||||||
|
|
||||||
|
/* Type header differs. */
|
||||||
|
#define TYPE_HEAD(x,y) PyVarObject_HEAD_INIT(x,y)
|
||||||
|
|
||||||
|
/* Text interface. Use unicode strings. */
|
||||||
|
#define Text_Type PyUnicode_Type
|
||||||
|
#define Text_Check PyUnicode_Check
|
||||||
|
|
||||||
|
#ifndef PYPY_VERSION
|
||||||
|
#define Text_FromLocale(s) PyUnicode_DecodeLocale((s), "strict")
|
||||||
|
#else /* PYPY_VERSION */
|
||||||
|
/* workaround: missing function for pypy */
|
||||||
|
#define Text_FromLocale PyUnicode_FromString
|
||||||
|
#endif /* PYPY_VERSION */
|
||||||
|
|
||||||
|
#define Text_FromUTF8 PyUnicode_FromString
|
||||||
|
#define Text_FromUTF8AndSize PyUnicode_FromStringAndSize
|
||||||
|
#define Text_FromFormat PyUnicode_FromFormat
|
||||||
|
#define Text_GetSize PyUnicode_GetSize
|
||||||
|
#define Text_GET_SIZE PyUnicode_GET_SIZE
|
||||||
|
|
||||||
|
/* Binary interface. Use bytes. */
|
||||||
|
#define Bytes_Type PyBytes_Type
|
||||||
|
#define Bytes_Check PyBytes_Check
|
||||||
|
#define Bytes_Size PyBytes_Size
|
||||||
|
#define Bytes_AsString PyBytes_AsString
|
||||||
|
#define Bytes_AsStringAndSize PyBytes_AsStringAndSize
|
||||||
|
#define Bytes_FromStringAndSize PyBytes_FromStringAndSize
|
||||||
|
#define Bytes_FromFormat PyBytes_FromFormat
|
||||||
|
#define Bytes_AS_STRING PyBytes_AS_STRING
|
||||||
|
#define Bytes_GET_SIZE PyBytes_GET_SIZE
|
||||||
|
#define Bytes_AsDecodeObject PyBytes_AsDecodedObject
|
||||||
|
|
||||||
|
#define Object_Unicode PyObject_Str
|
||||||
|
|
||||||
|
#define IsTextObj(x) (PyUnicode_Check(x) || PyBytes_Check(x))
|
||||||
|
|
||||||
|
/* Renamed builtins */
|
||||||
|
#define BUILTINS_MODULE "builtins"
|
||||||
|
#define BUILTINS_UNICODE "str"
|
||||||
|
#define BUILTINS_UNICHR "chr"
|
||||||
|
|
||||||
|
/* Defaults for unicode file path encoding */
|
||||||
|
#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding
|
||||||
|
#if defined(MS_WIN32)
|
||||||
|
#define UNICODE_DEF_FS_ERROR "replace"
|
||||||
|
#else
|
||||||
|
#define UNICODE_DEF_FS_ERROR "surrogateescape"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#else /* #if PY_MAJOR_VERSION >= 3 */
|
||||||
|
|
||||||
|
#define PY3 0
|
||||||
|
|
||||||
|
/* Module init function returns nothing. */
|
||||||
|
#define MODINIT_RETURN(x) return
|
||||||
|
#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC init##mod_name (void)
|
||||||
|
#define DECREF_MOD(mod)
|
||||||
|
|
||||||
|
/* Type header differs. */
|
||||||
|
#define TYPE_HEAD(x,y) \
|
||||||
|
PyObject_HEAD_INIT(x) \
|
||||||
|
0,
|
||||||
|
|
||||||
|
/* Text interface. Use ascii strings. */
|
||||||
|
#define Text_Type PyString_Type
|
||||||
|
#define Text_Check PyString_Check
|
||||||
|
#define Text_FromLocale PyString_FromString
|
||||||
|
#define Text_FromUTF8 PyString_FromString
|
||||||
|
#define Text_FromUTF8AndSize PyString_FromStringAndSize
|
||||||
|
#define Text_FromFormat PyString_FromFormat
|
||||||
|
#define Text_GetSize PyString_GetSize
|
||||||
|
#define Text_GET_SIZE PyString_GET_SIZE
|
||||||
|
|
||||||
|
/* Binary interface. Use ascii strings. */
|
||||||
|
#define Bytes_Type PyString_Type
|
||||||
|
#define Bytes_Check PyString_Check
|
||||||
|
#define Bytes_Size PyString_Size
|
||||||
|
#define Bytes_AsString PyString_AsString
|
||||||
|
#define Bytes_AsStringAndSize PyString_AsStringAndSize
|
||||||
|
#define Bytes_FromStringAndSize PyString_FromStringAndSize
|
||||||
|
#define Bytes_FromFormat PyString_FromFormat
|
||||||
|
#define Bytes_AS_STRING PyString_AS_STRING
|
||||||
|
#define Bytes_GET_SIZE PyString_GET_SIZE
|
||||||
|
#define Bytes_AsDecodedObject PyString_AsDecodedObject
|
||||||
|
|
||||||
|
#define Object_Unicode PyObject_Unicode
|
||||||
|
|
||||||
|
/* Renamed builtins */
|
||||||
|
#define BUILTINS_MODULE "__builtin__"
|
||||||
|
#define BUILTINS_UNICODE "unicode"
|
||||||
|
#define BUILTINS_UNICHR "unichr"
|
||||||
|
|
||||||
|
/* Defaults for unicode file path encoding */
|
||||||
|
#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding
|
||||||
|
#define UNICODE_DEF_FS_ERROR "strict"
|
||||||
|
|
||||||
|
#endif /* #if PY_MAJOR_VERSION >= 3 */
|
||||||
|
|
||||||
|
#define PY2 (!PY3)
|
||||||
|
|
||||||
|
#define MODINIT_ERROR MODINIT_RETURN (NULL)
|
||||||
|
|
||||||
|
/* Module state. These macros are used to define per-module macros.
|
||||||
|
* v - global state variable (Python 2.x)
|
||||||
|
* s - global state structure (Python 3.x)
|
||||||
|
*/
|
||||||
|
#define PY2_GETSTATE(v) (&(v))
|
||||||
|
#define PY3_GETSTATE(s, m) ((struct s *) PyModule_GetState (m))
|
||||||
|
|
||||||
|
/* Pep 3123: Making PyObject_HEAD conform to standard C */
|
||||||
|
#if !defined(Py_TYPE)
|
||||||
|
#define Py_TYPE(o) (((PyObject *)(o))->ob_type)
|
||||||
|
#define Py_REFCNT(o) (((PyObject *)(o))->ob_refcnt)
|
||||||
|
#define Py_SIZE(o) (((PyVarObject *)(o))->ob_size)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Encode a unicode file path */
|
||||||
|
#define Unicode_AsEncodedPath(u) \
|
||||||
|
PyUnicode_AsEncodedString ((u), UNICODE_DEF_FS_CODEC, UNICODE_DEF_FS_ERROR)
|
||||||
|
|
||||||
|
#define RELATIVE_MODULE(m) ("." m)
|
||||||
|
|
||||||
|
#define HAVE_OLD_BUFPROTO PY2
|
||||||
|
|
||||||
|
#if !defined(PG_ENABLE_OLDBUF) /* allow for command line override */
|
||||||
|
#if HAVE_OLD_BUFPROTO
|
||||||
|
#define PG_ENABLE_OLDBUF 1
|
||||||
|
#else
|
||||||
|
#define PG_ENABLE_OLDBUF 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
|
||||||
|
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef Py_TPFLAGS_HAVE_CLASS
|
||||||
|
#define Py_TPFLAGS_HAVE_CLASS 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef Py_TPFLAGS_CHECKTYPES
|
||||||
|
#define Py_TPFLAGS_CHECKTYPES 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX >= 0x03020000
|
||||||
|
#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \
|
||||||
|
PySlice_GetIndicesEx(slice, length, start, stop, step, slicelength)
|
||||||
|
#else
|
||||||
|
#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \
|
||||||
|
PySlice_GetIndicesEx((PySliceObject *)(slice), length, \
|
||||||
|
start, stop, step, slicelength)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Support new buffer protocol? */
|
||||||
|
#if !defined(PG_ENABLE_NEWBUF) /* allow for command line override */
|
||||||
|
#if !defined(PYPY_VERSION)
|
||||||
|
#define PG_ENABLE_NEWBUF 1
|
||||||
|
#else
|
||||||
|
#define PG_ENABLE_NEWBUF 0
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* #if !defined(PGCOMPAT_H) */
|
16
venv/Include/site/python3.7/pygame/pgopengl.h
Normal file
16
venv/Include/site/python3.7/pygame/pgopengl.h
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
#if !defined(PGOPENGL_H)
|
||||||
|
#define PGOPENGL_H
|
||||||
|
|
||||||
|
/** This header includes definitions of Opengl functions as pointer types for
|
||||||
|
** use with the SDL function SDL_GL_GetProcAddress.
|
||||||
|
**/
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define GL_APIENTRY __stdcall
|
||||||
|
#else
|
||||||
|
#define GL_APIENTRY
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef void (GL_APIENTRY *GL_glReadPixels_Func)(int, int, int, int, unsigned int, unsigned int, void*);
|
||||||
|
|
||||||
|
#endif
|
34
venv/Include/site/python3.7/pygame/pygame.h
Normal file
34
venv/Include/site/python3.7/pygame/pygame.h
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* To allow the Pygame C api to be globally shared by all code within an
|
||||||
|
* extension module built from multiple C files, only include the pygame.h
|
||||||
|
* header within the top level C file, the one which calls the
|
||||||
|
* 'import_pygame_*' macros. All other C source files of the module should
|
||||||
|
* include _pygame.h instead.
|
||||||
|
*/
|
||||||
|
#ifndef PYGAME_H
|
||||||
|
#define PYGAME_H
|
||||||
|
|
||||||
|
#include "_pygame.h"
|
||||||
|
|
||||||
|
#endif
|
143
venv/Include/site/python3.7/pygame/scrap.h
Normal file
143
venv/Include/site/python3.7/pygame/scrap.h
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2006, 2007 Rene Dudfield, Marcus von Appen
|
||||||
|
|
||||||
|
Originally put in the public domain by Sam Lantinga.
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* This is unconditionally defined in Python.h */
|
||||||
|
#if defined(_POSIX_C_SOURCE)
|
||||||
|
#undef _POSIX_C_SOURCE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <Python.h>
|
||||||
|
|
||||||
|
/* Handle clipboard text and data in arbitrary formats */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Predefined supported pygame scrap types.
|
||||||
|
*/
|
||||||
|
#define PYGAME_SCRAP_TEXT "text/plain"
|
||||||
|
#define PYGAME_SCRAP_BMP "image/bmp"
|
||||||
|
#define PYGAME_SCRAP_PPM "image/ppm"
|
||||||
|
#define PYGAME_SCRAP_PBM "image/pbm"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The supported scrap clipboard types.
|
||||||
|
*
|
||||||
|
* This is only relevant in a X11 environment, which supports mouse
|
||||||
|
* selections as well. For Win32 and MacOS environments the default
|
||||||
|
* clipboard is used, no matter what value is passed.
|
||||||
|
*/
|
||||||
|
typedef enum
|
||||||
|
{
|
||||||
|
SCRAP_CLIPBOARD,
|
||||||
|
SCRAP_SELECTION /* only supported in X11 environments. */
|
||||||
|
} ScrapClipType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Macro for initialization checks.
|
||||||
|
*/
|
||||||
|
#define PYGAME_SCRAP_INIT_CHECK() \
|
||||||
|
if(!pygame_scrap_initialized()) \
|
||||||
|
return (PyErr_SetString (pgExc_SDLError, \
|
||||||
|
"scrap system not initialized."), NULL)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Checks, whether the pygame scrap module was initialized.
|
||||||
|
*
|
||||||
|
* \return 1 if the modules was initialized, 0 otherwise.
|
||||||
|
*/
|
||||||
|
extern int
|
||||||
|
pygame_scrap_initialized (void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Initializes the pygame scrap module internals. Call this before any
|
||||||
|
* other method.
|
||||||
|
*
|
||||||
|
* \return 1 on successful initialization, 0 otherwise.
|
||||||
|
*/
|
||||||
|
extern int
|
||||||
|
pygame_scrap_init (void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Checks, whether the pygame window lost the clipboard focus or not.
|
||||||
|
*
|
||||||
|
* \return 1 if the window lost the focus, 0 otherwise.
|
||||||
|
*/
|
||||||
|
extern int
|
||||||
|
pygame_scrap_lost (void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Places content of a specific type into the clipboard.
|
||||||
|
*
|
||||||
|
* \note For X11 the following notes are important: The following types
|
||||||
|
* are reserved for internal usage and thus will throw an error on
|
||||||
|
* setting them: "TIMESTAMP", "TARGETS", "SDL_SELECTION".
|
||||||
|
* Setting PYGAME_SCRAP_TEXT ("text/plain") will also automatically
|
||||||
|
* set the X11 types "STRING" (XA_STRING), "TEXT" and "UTF8_STRING".
|
||||||
|
*
|
||||||
|
* For Win32 the following notes are important: Setting
|
||||||
|
* PYGAME_SCRAP_TEXT ("text/plain") will also automatically set
|
||||||
|
* the Win32 type "TEXT" (CF_TEXT).
|
||||||
|
*
|
||||||
|
* For QNX the following notes are important: Setting
|
||||||
|
* PYGAME_SCRAP_TEXT ("text/plain") will also automatically set
|
||||||
|
* the QNX type "TEXT" (Ph_CL_TEXT).
|
||||||
|
*
|
||||||
|
* \param type The type of the content.
|
||||||
|
* \param srclen The length of the content.
|
||||||
|
* \param src The NULL terminated content.
|
||||||
|
* \return 1, if the content could be successfully pasted into the clipboard,
|
||||||
|
* 0 otherwise.
|
||||||
|
*/
|
||||||
|
extern int
|
||||||
|
pygame_scrap_put (char *type, int srclen, char *src);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the current content from the clipboard.
|
||||||
|
*
|
||||||
|
* \note The received content does not need to be the content previously
|
||||||
|
* placed in the clipboard using pygame_put_scrap(). See the
|
||||||
|
* pygame_put_scrap() notes for more details.
|
||||||
|
*
|
||||||
|
* \param type The type of the content to receive.
|
||||||
|
* \param count The size of the returned content.
|
||||||
|
* \return The content or NULL in case of an error or if no content of the
|
||||||
|
* specified type was available.
|
||||||
|
*/
|
||||||
|
extern char*
|
||||||
|
pygame_scrap_get (char *type, unsigned long *count);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Gets the currently available content types from the clipboard.
|
||||||
|
*
|
||||||
|
* \return The different available content types or NULL in case of an
|
||||||
|
* error or if no content type is available.
|
||||||
|
*/
|
||||||
|
extern char**
|
||||||
|
pygame_scrap_get_types (void);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \brief Checks whether content for the specified scrap type is currently
|
||||||
|
* available in the clipboard.
|
||||||
|
*
|
||||||
|
* \param type The type to check for.
|
||||||
|
* \return 1, if there is content and 0 otherwise.
|
||||||
|
*/
|
||||||
|
extern int
|
||||||
|
pygame_scrap_contains (char *type);
|
383
venv/Include/site/python3.7/pygame/surface.h
Normal file
383
venv/Include/site/python3.7/pygame/surface.h
Normal file
@ -0,0 +1,383 @@
|
|||||||
|
/*
|
||||||
|
pygame - Python Game Library
|
||||||
|
Copyright (C) 2000-2001 Pete Shinners
|
||||||
|
Copyright (C) 2007 Marcus von Appen
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Library General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Library General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Library General Public
|
||||||
|
License along with this library; if not, write to the Free
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
|
|
||||||
|
Pete Shinners
|
||||||
|
pete@shinners.org
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SURFACE_H
|
||||||
|
#define SURFACE_H
|
||||||
|
|
||||||
|
/* This is defined in SDL.h */
|
||||||
|
#if defined(_POSIX_C_SOURCE)
|
||||||
|
#undef _POSIX_C_SOURCE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <SDL.h>
|
||||||
|
#include "pygame.h"
|
||||||
|
|
||||||
|
/* Blend modes */
|
||||||
|
#define PYGAME_BLEND_ADD 0x1
|
||||||
|
#define PYGAME_BLEND_SUB 0x2
|
||||||
|
#define PYGAME_BLEND_MULT 0x3
|
||||||
|
#define PYGAME_BLEND_MIN 0x4
|
||||||
|
#define PYGAME_BLEND_MAX 0x5
|
||||||
|
|
||||||
|
#define PYGAME_BLEND_RGB_ADD 0x1
|
||||||
|
#define PYGAME_BLEND_RGB_SUB 0x2
|
||||||
|
#define PYGAME_BLEND_RGB_MULT 0x3
|
||||||
|
#define PYGAME_BLEND_RGB_MIN 0x4
|
||||||
|
#define PYGAME_BLEND_RGB_MAX 0x5
|
||||||
|
|
||||||
|
#define PYGAME_BLEND_RGBA_ADD 0x6
|
||||||
|
#define PYGAME_BLEND_RGBA_SUB 0x7
|
||||||
|
#define PYGAME_BLEND_RGBA_MULT 0x8
|
||||||
|
#define PYGAME_BLEND_RGBA_MIN 0x9
|
||||||
|
#define PYGAME_BLEND_RGBA_MAX 0x10
|
||||||
|
#define PYGAME_BLEND_PREMULTIPLIED 0x11
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#if SDL_BYTEORDER == SDL_LIL_ENDIAN
|
||||||
|
#define GET_PIXEL_24(b) (b[0] + (b[1] << 8) + (b[2] << 16))
|
||||||
|
#else
|
||||||
|
#define GET_PIXEL_24(b) (b[2] + (b[1] << 8) + (b[0] << 16))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GET_PIXEL(pxl, bpp, source) \
|
||||||
|
switch (bpp) \
|
||||||
|
{ \
|
||||||
|
case 2: \
|
||||||
|
pxl = *((Uint16 *) (source)); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
pxl = *((Uint32 *) (source)); \
|
||||||
|
break; \
|
||||||
|
default: \
|
||||||
|
{ \
|
||||||
|
Uint8 *b = (Uint8 *) source; \
|
||||||
|
pxl = GET_PIXEL_24(b); \
|
||||||
|
} \
|
||||||
|
break; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#if IS_SDLv1
|
||||||
|
#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \
|
||||||
|
_sR = ((px & fmt->Rmask) >> fmt->Rshift); \
|
||||||
|
_sR = (_sR << fmt->Rloss) + (_sR >> (8 - (fmt->Rloss << 1))); \
|
||||||
|
_sG = ((px & fmt->Gmask) >> fmt->Gshift); \
|
||||||
|
_sG = (_sG << fmt->Gloss) + (_sG >> (8 - (fmt->Gloss << 1))); \
|
||||||
|
_sB = ((px & fmt->Bmask) >> fmt->Bshift); \
|
||||||
|
_sB = (_sB << fmt->Bloss) + (_sB >> (8 - (fmt->Bloss << 1))); \
|
||||||
|
if (ppa) \
|
||||||
|
{ \
|
||||||
|
_sA = ((px & fmt->Amask) >> fmt->Ashift); \
|
||||||
|
_sA = (_sA << fmt->Aloss) + (_sA >> (8 - (fmt->Aloss << 1))); \
|
||||||
|
} \
|
||||||
|
else \
|
||||||
|
{ \
|
||||||
|
_sA = 255; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \
|
||||||
|
sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \
|
||||||
|
sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \
|
||||||
|
sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \
|
||||||
|
sa = 255;
|
||||||
|
|
||||||
|
/* For 1 byte palette pixels */
|
||||||
|
#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \
|
||||||
|
*(px) = (Uint8) SDL_MapRGB(fmt, _dR, _dG, _dB)
|
||||||
|
#else /* IS_SDLv2 */
|
||||||
|
#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \
|
||||||
|
SDL_GetRGBA(px, fmt, &(_sR), &(_sG), &(_sB), &(_sA)); \
|
||||||
|
if (!ppa) { \
|
||||||
|
_sA = 255; \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \
|
||||||
|
sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \
|
||||||
|
sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \
|
||||||
|
sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \
|
||||||
|
sa = 255;
|
||||||
|
|
||||||
|
/* For 1 byte palette pixels */
|
||||||
|
#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \
|
||||||
|
*(px) = (Uint8) SDL_MapRGBA(fmt, _dR, _dG, _dB, _dA)
|
||||||
|
#endif /* IS_SDLv2 */
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#if SDL_BYTEORDER == SDL_LIL_ENDIAN
|
||||||
|
#define SET_OFFSETS_24(or, og, ob, fmt) \
|
||||||
|
{ \
|
||||||
|
or = (fmt->Rshift == 0 ? 0 : \
|
||||||
|
fmt->Rshift == 8 ? 1 : \
|
||||||
|
2 ); \
|
||||||
|
og = (fmt->Gshift == 0 ? 0 : \
|
||||||
|
fmt->Gshift == 8 ? 1 : \
|
||||||
|
2 ); \
|
||||||
|
ob = (fmt->Bshift == 0 ? 0 : \
|
||||||
|
fmt->Bshift == 8 ? 1 : \
|
||||||
|
2 ); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SET_OFFSETS_32(or, og, ob, fmt) \
|
||||||
|
{ \
|
||||||
|
or = (fmt->Rshift == 0 ? 0 : \
|
||||||
|
fmt->Rshift == 8 ? 1 : \
|
||||||
|
fmt->Rshift == 16 ? 2 : \
|
||||||
|
3 ); \
|
||||||
|
og = (fmt->Gshift == 0 ? 0 : \
|
||||||
|
fmt->Gshift == 8 ? 1 : \
|
||||||
|
fmt->Gshift == 16 ? 2 : \
|
||||||
|
3 ); \
|
||||||
|
ob = (fmt->Bshift == 0 ? 0 : \
|
||||||
|
fmt->Bshift == 8 ? 1 : \
|
||||||
|
fmt->Bshift == 16 ? 2 : \
|
||||||
|
3 ); \
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define SET_OFFSETS_24(or, og, ob, fmt) \
|
||||||
|
{ \
|
||||||
|
or = (fmt->Rshift == 0 ? 2 : \
|
||||||
|
fmt->Rshift == 8 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
og = (fmt->Gshift == 0 ? 2 : \
|
||||||
|
fmt->Gshift == 8 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
ob = (fmt->Bshift == 0 ? 2 : \
|
||||||
|
fmt->Bshift == 8 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SET_OFFSETS_32(or, og, ob, fmt) \
|
||||||
|
{ \
|
||||||
|
or = (fmt->Rshift == 0 ? 3 : \
|
||||||
|
fmt->Rshift == 8 ? 2 : \
|
||||||
|
fmt->Rshift == 16 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
og = (fmt->Gshift == 0 ? 3 : \
|
||||||
|
fmt->Gshift == 8 ? 2 : \
|
||||||
|
fmt->Gshift == 16 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
ob = (fmt->Bshift == 0 ? 3 : \
|
||||||
|
fmt->Bshift == 8 ? 2 : \
|
||||||
|
fmt->Bshift == 16 ? 1 : \
|
||||||
|
0 ); \
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#define CREATE_PIXEL(buf, r, g, b, a, bp, ft) \
|
||||||
|
switch (bp) \
|
||||||
|
{ \
|
||||||
|
case 2: \
|
||||||
|
*((Uint16 *) (buf)) = \
|
||||||
|
((r >> ft->Rloss) << ft->Rshift) | \
|
||||||
|
((g >> ft->Gloss) << ft->Gshift) | \
|
||||||
|
((b >> ft->Bloss) << ft->Bshift) | \
|
||||||
|
((a >> ft->Aloss) << ft->Ashift); \
|
||||||
|
break; \
|
||||||
|
case 4: \
|
||||||
|
*((Uint32 *) (buf)) = \
|
||||||
|
((r >> ft->Rloss) << ft->Rshift) | \
|
||||||
|
((g >> ft->Gloss) << ft->Gshift) | \
|
||||||
|
((b >> ft->Bloss) << ft->Bshift) | \
|
||||||
|
((a >> ft->Aloss) << ft->Ashift); \
|
||||||
|
break; \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Pretty good idea from Tom Duff :-). */
|
||||||
|
#define LOOP_UNROLLED4(code, n, width) \
|
||||||
|
n = (width + 3) / 4; \
|
||||||
|
switch (width & 3) \
|
||||||
|
{ \
|
||||||
|
case 0: do { code; \
|
||||||
|
case 3: code; \
|
||||||
|
case 2: code; \
|
||||||
|
case 1: code; \
|
||||||
|
} while (--n > 0); \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Used in the srcbpp == dstbpp == 1 blend functions */
|
||||||
|
#define REPEAT_3(code) \
|
||||||
|
code; \
|
||||||
|
code; \
|
||||||
|
code;
|
||||||
|
|
||||||
|
#define REPEAT_4(code) \
|
||||||
|
code; \
|
||||||
|
code; \
|
||||||
|
code; \
|
||||||
|
code;
|
||||||
|
|
||||||
|
|
||||||
|
#define BLEND_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \
|
||||||
|
tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \
|
||||||
|
tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255);
|
||||||
|
|
||||||
|
#define BLEND_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \
|
||||||
|
tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \
|
||||||
|
tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0);
|
||||||
|
|
||||||
|
#define BLEND_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
dR = (dR && sR) ? (dR * sR) >> 8 : 0; \
|
||||||
|
dG = (dG && sG) ? (dG * sG) >> 8 : 0; \
|
||||||
|
dB = (dB && sB) ? (dB * sB) >> 8 : 0;
|
||||||
|
|
||||||
|
#define BLEND_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
if(sR < dR) { dR = sR; } \
|
||||||
|
if(sG < dG) { dG = sG; } \
|
||||||
|
if(sB < dB) { dB = sB; }
|
||||||
|
|
||||||
|
#define BLEND_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
if(sR > dR) { dR = sR; } \
|
||||||
|
if(sG > dG) { dG = sG; } \
|
||||||
|
if(sB > dB) { dB = sB; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#define BLEND_RGBA_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \
|
||||||
|
tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \
|
||||||
|
tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); \
|
||||||
|
tmp = dA + sA; dA = (tmp <= 255 ? tmp : 255);
|
||||||
|
|
||||||
|
#define BLEND_RGBA_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \
|
||||||
|
tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \
|
||||||
|
tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); \
|
||||||
|
tmp = dA - sA; dA = (tmp >= 0 ? tmp : 0);
|
||||||
|
|
||||||
|
#define BLEND_RGBA_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
dR = (dR && sR) ? (dR * sR) >> 8 : 0; \
|
||||||
|
dG = (dG && sG) ? (dG * sG) >> 8 : 0; \
|
||||||
|
dB = (dB && sB) ? (dB * sB) >> 8 : 0; \
|
||||||
|
dA = (dA && sA) ? (dA * sA) >> 8 : 0;
|
||||||
|
|
||||||
|
#define BLEND_RGBA_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
if(sR < dR) { dR = sR; } \
|
||||||
|
if(sG < dG) { dG = sG; } \
|
||||||
|
if(sB < dB) { dB = sB; } \
|
||||||
|
if(sA < dA) { dA = sA; }
|
||||||
|
|
||||||
|
#define BLEND_RGBA_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
if(sR > dR) { dR = sR; } \
|
||||||
|
if(sG > dG) { dG = sG; } \
|
||||||
|
if(sB > dB) { dB = sB; } \
|
||||||
|
if(sA > dA) { dA = sA; }
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
/* Choose an alpha blend equation. If the sign is preserved on a right shift
|
||||||
|
* then use a specialized, faster, equation. Otherwise a more general form,
|
||||||
|
* where all additions are done before the shift, is needed.
|
||||||
|
*/
|
||||||
|
#if (-1 >> 1) < 0
|
||||||
|
#define ALPHA_BLEND_COMP(sC, dC, sA) ((((sC - dC) * sA + sC) >> 8) + dC)
|
||||||
|
#else
|
||||||
|
#define ALPHA_BLEND_COMP(sC, dC, sA) (((dC << 8) + (sC - dC) * sA + sC) >> 8)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
do { \
|
||||||
|
if (dA) \
|
||||||
|
{ \
|
||||||
|
dR = ALPHA_BLEND_COMP(sR, dR, sA); \
|
||||||
|
dG = ALPHA_BLEND_COMP(sG, dG, sA); \
|
||||||
|
dB = ALPHA_BLEND_COMP(sB, dB, sA); \
|
||||||
|
dA = sA + dA - ((sA * dA) / 255); \
|
||||||
|
} \
|
||||||
|
else \
|
||||||
|
{ \
|
||||||
|
dR = sR; \
|
||||||
|
dG = sG; \
|
||||||
|
dB = sB; \
|
||||||
|
dA = sA; \
|
||||||
|
} \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
|
#define ALPHA_BLEND_PREMULTIPLIED_COMP(sC, dC, sA) (sC + dC - ((dC * sA) >> 8))
|
||||||
|
|
||||||
|
#define ALPHA_BLEND_PREMULTIPLIED(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
do { \
|
||||||
|
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sR, dR, sA); dR = (tmp > 255 ? 255 : tmp); \
|
||||||
|
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sG, dG, sA); dG = (tmp > 255 ? 255 : tmp); \
|
||||||
|
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sB, dB, sA); dB = (tmp > 255 ? 255 : tmp); \
|
||||||
|
dA = sA + dA - ((sA * dA) / 255); \
|
||||||
|
} while(0)
|
||||||
|
#elif 0
|
||||||
|
|
||||||
|
#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \
|
||||||
|
do { \
|
||||||
|
if(sA){ \
|
||||||
|
if(dA && sA < 255){ \
|
||||||
|
int dContrib = dA*(255 - sA)/255; \
|
||||||
|
dA = sA+dA - ((sA*dA)/255); \
|
||||||
|
dR = (dR*dContrib + sR*sA)/dA; \
|
||||||
|
dG = (dG*dContrib + sG*sA)/dA; \
|
||||||
|
dB = (dB*dContrib + sB*sA)/dA; \
|
||||||
|
}else{ \
|
||||||
|
dR = sR; \
|
||||||
|
dG = sG; \
|
||||||
|
dB = sB; \
|
||||||
|
dA = sA; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
} while(0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int
|
||||||
|
surface_fill_blend (SDL_Surface *surface, SDL_Rect *rect, Uint32 color,
|
||||||
|
int blendargs);
|
||||||
|
|
||||||
|
void
|
||||||
|
surface_respect_clip_rect (SDL_Surface *surface, SDL_Rect *rect);
|
||||||
|
|
||||||
|
int
|
||||||
|
pygame_AlphaBlit (SDL_Surface * src, SDL_Rect * srcrect,
|
||||||
|
SDL_Surface * dst, SDL_Rect * dstrect, int the_args);
|
||||||
|
|
||||||
|
int
|
||||||
|
pygame_Blit (SDL_Surface * src, SDL_Rect * srcrect,
|
||||||
|
SDL_Surface * dst, SDL_Rect * dstrect, int the_args);
|
||||||
|
|
||||||
|
#endif /* SURFACE_H */
|
587
venv/Lib/site-packages/cv2/LICENSE-3RD-PARTY.txt
Normal file
587
venv/Lib/site-packages/cv2/LICENSE-3RD-PARTY.txt
Normal file
@ -0,0 +1,587 @@
|
|||||||
|
OpenCV library is redistributed within opencv-python package.
|
||||||
|
This license applies to OpenCV binary in the directory cv2/.
|
||||||
|
|
||||||
|
By downloading, copying, installing or using the software you agree to this license.
|
||||||
|
If you do not agree to this license, do not download, install,
|
||||||
|
copy or use the software.
|
||||||
|
|
||||||
|
|
||||||
|
License Agreement
|
||||||
|
For Open Source Computer Vision Library
|
||||||
|
(3-clause BSD License)
|
||||||
|
|
||||||
|
Copyright (C) 2000-2018, Intel Corporation, all rights reserved.
|
||||||
|
Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
|
||||||
|
Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
|
||||||
|
Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
|
||||||
|
Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
|
||||||
|
Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
|
||||||
|
Third party copyrights are property of their respective owners.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the names of the copyright holders nor the names of the contributors
|
||||||
|
may be used to endorse or promote products derived from this software
|
||||||
|
without specific prior written permission.
|
||||||
|
|
||||||
|
This software is provided by the copyright holders and contributors "as is" and
|
||||||
|
any express or implied warranties, including, but not limited to, the implied
|
||||||
|
warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||||
|
In no event shall copyright holders or contributors be liable for any direct,
|
||||||
|
indirect, incidental, special, exemplary, or consequential damages
|
||||||
|
(including, but not limited to, procurement of substitute goods or services;
|
||||||
|
loss of use, data, or profits; or business interruption) however caused
|
||||||
|
and on any theory of liability, whether in contract, strict liability,
|
||||||
|
or tort (including negligence or otherwise) arising in any way out of
|
||||||
|
the use of this software, even if advised of the possibility of such damage.
|
||||||
|
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
libvpx is redistributed within opencv-python Linux packages.
|
||||||
|
This license applies to libvpx binary in the directory cv2/.
|
||||||
|
|
||||||
|
Copyright (c) 2010, The WebM Project authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
* Neither the name of Google, nor the WebM Project, nor the names
|
||||||
|
of its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written
|
||||||
|
permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
------------------------------------------------------------------------------
|
||||||
|
FFmpeg is redistributed within opencv-python Windows packages.
|
||||||
|
Qt 4.8.7 is redistributed within opencv-python Linux and macOS packages.
|
||||||
|
This license applies to FFmpeg and Qt binaries in the directory cv2/.
|
||||||
|
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
Version 2.1, February 1999
|
||||||
|
|
||||||
|
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
[This is the first released version of the Lesser GPL. It also counts
|
||||||
|
as the successor of the GNU Library Public License, version 2, hence
|
||||||
|
the version number 2.1.]
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The licenses for most software are designed to take away your
|
||||||
|
freedom to share and change it. By contrast, the GNU General Public
|
||||||
|
Licenses are intended to guarantee your freedom to share and change
|
||||||
|
free software--to make sure the software is free for all its users.
|
||||||
|
|
||||||
|
This license, the Lesser General Public License, applies to some
|
||||||
|
specially designated software packages--typically libraries--of the
|
||||||
|
Free Software Foundation and other authors who decide to use it. You
|
||||||
|
can use it too, but we suggest you first think carefully about whether
|
||||||
|
this license or the ordinary General Public License is the better
|
||||||
|
strategy to use in any particular case, based on the explanations below.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom of use,
|
||||||
|
not price. Our General Public Licenses are designed to make sure that
|
||||||
|
you have the freedom to distribute copies of free software (and charge
|
||||||
|
for this service if you wish); that you receive source code or can get
|
||||||
|
it if you want it; that you can change the software and use pieces of
|
||||||
|
it in new free programs; and that you are informed that you can do
|
||||||
|
these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to make restrictions that forbid
|
||||||
|
distributors to deny you these rights or to ask you to surrender these
|
||||||
|
rights. These restrictions translate to certain responsibilities for
|
||||||
|
you if you distribute copies of the library or if you modify it.
|
||||||
|
|
||||||
|
For example, if you distribute copies of the library, whether gratis
|
||||||
|
or for a fee, you must give the recipients all the rights that we gave
|
||||||
|
you. You must make sure that they, too, receive or can get the source
|
||||||
|
code. If you link other code with the library, you must provide
|
||||||
|
complete object files to the recipients, so that they can relink them
|
||||||
|
with the library after making changes to the library and recompiling
|
||||||
|
it. And you must show them these terms so they know their rights.
|
||||||
|
|
||||||
|
We protect your rights with a two-step method: (1) we copyright the
|
||||||
|
library, and (2) we offer you this license, which gives you legal
|
||||||
|
permission to copy, distribute and/or modify the library.
|
||||||
|
|
||||||
|
To protect each distributor, we want to make it very clear that
|
||||||
|
there is no warranty for the free library. Also, if the library is
|
||||||
|
modified by someone else and passed on, the recipients should know
|
||||||
|
that what they have is not the original version, so that the original
|
||||||
|
author's reputation will not be affected by problems that might be
|
||||||
|
introduced by others.
|
||||||
|
|
||||||
|
Finally, software patents pose a constant threat to the existence of
|
||||||
|
any free program. We wish to make sure that a company cannot
|
||||||
|
effectively restrict the users of a free program by obtaining a
|
||||||
|
restrictive license from a patent holder. Therefore, we insist that
|
||||||
|
any patent license obtained for a version of the library must be
|
||||||
|
consistent with the full freedom of use specified in this license.
|
||||||
|
|
||||||
|
Most GNU software, including some libraries, is covered by the
|
||||||
|
ordinary GNU General Public License. This license, the GNU Lesser
|
||||||
|
General Public License, applies to certain designated libraries, and
|
||||||
|
is quite different from the ordinary General Public License. We use
|
||||||
|
this license for certain libraries in order to permit linking those
|
||||||
|
libraries into non-free programs.
|
||||||
|
|
||||||
|
When a program is linked with a library, whether statically or using
|
||||||
|
a shared library, the combination of the two is legally speaking a
|
||||||
|
combined work, a derivative of the original library. The ordinary
|
||||||
|
General Public License therefore permits such linking only if the
|
||||||
|
entire combination fits its criteria of freedom. The Lesser General
|
||||||
|
Public License permits more lax criteria for linking other code with
|
||||||
|
the library.
|
||||||
|
|
||||||
|
We call this license the "Lesser" General Public License because it
|
||||||
|
does Less to protect the user's freedom than the ordinary General
|
||||||
|
Public License. It also provides other free software developers Less
|
||||||
|
of an advantage over competing non-free programs. These disadvantages
|
||||||
|
are the reason we use the ordinary General Public License for many
|
||||||
|
libraries. However, the Lesser license provides advantages in certain
|
||||||
|
special circumstances.
|
||||||
|
|
||||||
|
For example, on rare occasions, there may be a special need to
|
||||||
|
encourage the widest possible use of a certain library, so that it becomes
|
||||||
|
a de-facto standard. To achieve this, non-free programs must be
|
||||||
|
allowed to use the library. A more frequent case is that a free
|
||||||
|
library does the same job as widely used non-free libraries. In this
|
||||||
|
case, there is little to gain by limiting the free library to free
|
||||||
|
software only, so we use the Lesser General Public License.
|
||||||
|
|
||||||
|
In other cases, permission to use a particular library in non-free
|
||||||
|
programs enables a greater number of people to use a large body of
|
||||||
|
free software. For example, permission to use the GNU C Library in
|
||||||
|
non-free programs enables many more people to use the whole GNU
|
||||||
|
operating system, as well as its variant, the GNU/Linux operating
|
||||||
|
system.
|
||||||
|
|
||||||
|
Although the Lesser General Public License is Less protective of the
|
||||||
|
users' freedom, it does ensure that the user of a program that is
|
||||||
|
linked with the Library has the freedom and the wherewithal to run
|
||||||
|
that program using a modified version of the Library.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow. Pay close attention to the difference between a
|
||||||
|
"work based on the library" and a "work that uses the library". The
|
||||||
|
former contains code derived from the library, whereas the latter must
|
||||||
|
be combined with the library in order to run.
|
||||||
|
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||||
|
|
||||||
|
0. This License Agreement applies to any software library or other
|
||||||
|
program which contains a notice placed by the copyright holder or
|
||||||
|
other authorized party saying it may be distributed under the terms of
|
||||||
|
this Lesser General Public License (also called "this License").
|
||||||
|
Each licensee is addressed as "you".
|
||||||
|
|
||||||
|
A "library" means a collection of software functions and/or data
|
||||||
|
prepared so as to be conveniently linked with application programs
|
||||||
|
(which use some of those functions and data) to form executables.
|
||||||
|
|
||||||
|
The "Library", below, refers to any such software library or work
|
||||||
|
which has been distributed under these terms. A "work based on the
|
||||||
|
Library" means either the Library or any derivative work under
|
||||||
|
copyright law: that is to say, a work containing the Library or a
|
||||||
|
portion of it, either verbatim or with modifications and/or translated
|
||||||
|
straightforwardly into another language. (Hereinafter, translation is
|
||||||
|
included without limitation in the term "modification".)
|
||||||
|
|
||||||
|
"Source code" for a work means the preferred form of the work for
|
||||||
|
making modifications to it. For a library, complete source code means
|
||||||
|
all the source code for all modules it contains, plus any associated
|
||||||
|
interface definition files, plus the scripts used to control compilation
|
||||||
|
and installation of the library.
|
||||||
|
|
||||||
|
Activities other than copying, distribution and modification are not
|
||||||
|
covered by this License; they are outside its scope. The act of
|
||||||
|
running a program using the Library is not restricted, and output from
|
||||||
|
such a program is covered only if its contents constitute a work based
|
||||||
|
on the Library (independent of the use of the Library in a tool for
|
||||||
|
writing it). Whether that is true depends on what the Library does
|
||||||
|
and what the program that uses the Library does.
|
||||||
|
|
||||||
|
1. You may copy and distribute verbatim copies of the Library's
|
||||||
|
complete source code as you receive it, in any medium, provided that
|
||||||
|
you conspicuously and appropriately publish on each copy an
|
||||||
|
appropriate copyright notice and disclaimer of warranty; keep intact
|
||||||
|
all the notices that refer to this License and to the absence of any
|
||||||
|
warranty; and distribute a copy of this License along with the
|
||||||
|
Library.
|
||||||
|
|
||||||
|
You may charge a fee for the physical act of transferring a copy,
|
||||||
|
and you may at your option offer warranty protection in exchange for a
|
||||||
|
fee.
|
||||||
|
|
||||||
|
2. You may modify your copy or copies of the Library or any portion
|
||||||
|
of it, thus forming a work based on the Library, and copy and
|
||||||
|
distribute such modifications or work under the terms of Section 1
|
||||||
|
above, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The modified work must itself be a software library.
|
||||||
|
|
||||||
|
b) You must cause the files modified to carry prominent notices
|
||||||
|
stating that you changed the files and the date of any change.
|
||||||
|
|
||||||
|
c) You must cause the whole of the work to be licensed at no
|
||||||
|
charge to all third parties under the terms of this License.
|
||||||
|
|
||||||
|
d) If a facility in the modified Library refers to a function or a
|
||||||
|
table of data to be supplied by an application program that uses
|
||||||
|
the facility, other than as an argument passed when the facility
|
||||||
|
is invoked, then you must make a good faith effort to ensure that,
|
||||||
|
in the event an application does not supply such function or
|
||||||
|
table, the facility still operates, and performs whatever part of
|
||||||
|
its purpose remains meaningful.
|
||||||
|
|
||||||
|
(For example, a function in a library to compute square roots has
|
||||||
|
a purpose that is entirely well-defined independent of the
|
||||||
|
application. Therefore, Subsection 2d requires that any
|
||||||
|
application-supplied function or table used by this function must
|
||||||
|
be optional: if the application does not supply it, the square
|
||||||
|
root function must still compute square roots.)
|
||||||
|
|
||||||
|
These requirements apply to the modified work as a whole. If
|
||||||
|
identifiable sections of that work are not derived from the Library,
|
||||||
|
and can be reasonably considered independent and separate works in
|
||||||
|
themselves, then this License, and its terms, do not apply to those
|
||||||
|
sections when you distribute them as separate works. But when you
|
||||||
|
distribute the same sections as part of a whole which is a work based
|
||||||
|
on the Library, the distribution of the whole must be on the terms of
|
||||||
|
this License, whose permissions for other licensees extend to the
|
||||||
|
entire whole, and thus to each and every part regardless of who wrote
|
||||||
|
it.
|
||||||
|
|
||||||
|
Thus, it is not the intent of this section to claim rights or contest
|
||||||
|
your rights to work written entirely by you; rather, the intent is to
|
||||||
|
exercise the right to control the distribution of derivative or
|
||||||
|
collective works based on the Library.
|
||||||
|
|
||||||
|
In addition, mere aggregation of another work not based on the Library
|
||||||
|
with the Library (or with a work based on the Library) on a volume of
|
||||||
|
a storage or distribution medium does not bring the other work under
|
||||||
|
the scope of this License.
|
||||||
|
|
||||||
|
3. You may opt to apply the terms of the ordinary GNU General Public
|
||||||
|
License instead of this License to a given copy of the Library. To do
|
||||||
|
this, you must alter all the notices that refer to this License, so
|
||||||
|
that they refer to the ordinary GNU General Public License, version 2,
|
||||||
|
instead of to this License. (If a newer version than version 2 of the
|
||||||
|
ordinary GNU General Public License has appeared, then you can specify
|
||||||
|
that version instead if you wish.) Do not make any other change in
|
||||||
|
these notices.
|
||||||
|
|
||||||
|
Once this change is made in a given copy, it is irreversible for
|
||||||
|
that copy, so the ordinary GNU General Public License applies to all
|
||||||
|
subsequent copies and derivative works made from that copy.
|
||||||
|
|
||||||
|
This option is useful when you wish to copy part of the code of
|
||||||
|
the Library into a program that is not a library.
|
||||||
|
|
||||||
|
4. You may copy and distribute the Library (or a portion or
|
||||||
|
derivative of it, under Section 2) in object code or executable form
|
||||||
|
under the terms of Sections 1 and 2 above provided that you accompany
|
||||||
|
it with the complete corresponding machine-readable source code, which
|
||||||
|
must be distributed under the terms of Sections 1 and 2 above on a
|
||||||
|
medium customarily used for software interchange.
|
||||||
|
|
||||||
|
If distribution of object code is made by offering access to copy
|
||||||
|
from a designated place, then offering equivalent access to copy the
|
||||||
|
source code from the same place satisfies the requirement to
|
||||||
|
distribute the source code, even though third parties are not
|
||||||
|
compelled to copy the source along with the object code.
|
||||||
|
|
||||||
|
5. A program that contains no derivative of any portion of the
|
||||||
|
Library, but is designed to work with the Library by being compiled or
|
||||||
|
linked with it, is called a "work that uses the Library". Such a
|
||||||
|
work, in isolation, is not a derivative work of the Library, and
|
||||||
|
therefore falls outside the scope of this License.
|
||||||
|
|
||||||
|
However, linking a "work that uses the Library" with the Library
|
||||||
|
creates an executable that is a derivative of the Library (because it
|
||||||
|
contains portions of the Library), rather than a "work that uses the
|
||||||
|
library". The executable is therefore covered by this License.
|
||||||
|
Section 6 states terms for distribution of such executables.
|
||||||
|
|
||||||
|
When a "work that uses the Library" uses material from a header file
|
||||||
|
that is part of the Library, the object code for the work may be a
|
||||||
|
derivative work of the Library even though the source code is not.
|
||||||
|
Whether this is true is especially significant if the work can be
|
||||||
|
linked without the Library, or if the work is itself a library. The
|
||||||
|
threshold for this to be true is not precisely defined by law.
|
||||||
|
|
||||||
|
If such an object file uses only numerical parameters, data
|
||||||
|
structure layouts and accessors, and small macros and small inline
|
||||||
|
functions (ten lines or less in length), then the use of the object
|
||||||
|
file is unrestricted, regardless of whether it is legally a derivative
|
||||||
|
work. (Executables containing this object code plus portions of the
|
||||||
|
Library will still fall under Section 6.)
|
||||||
|
|
||||||
|
Otherwise, if the work is a derivative of the Library, you may
|
||||||
|
distribute the object code for the work under the terms of Section 6.
|
||||||
|
Any executables containing that work also fall under Section 6,
|
||||||
|
whether or not they are linked directly with the Library itself.
|
||||||
|
|
||||||
|
6. As an exception to the Sections above, you may also combine or
|
||||||
|
link a "work that uses the Library" with the Library to produce a
|
||||||
|
work containing portions of the Library, and distribute that work
|
||||||
|
under terms of your choice, provided that the terms permit
|
||||||
|
modification of the work for the customer's own use and reverse
|
||||||
|
engineering for debugging such modifications.
|
||||||
|
|
||||||
|
You must give prominent notice with each copy of the work that the
|
||||||
|
Library is used in it and that the Library and its use are covered by
|
||||||
|
this License. You must supply a copy of this License. If the work
|
||||||
|
during execution displays copyright notices, you must include the
|
||||||
|
copyright notice for the Library among them, as well as a reference
|
||||||
|
directing the user to the copy of this License. Also, you must do one
|
||||||
|
of these things:
|
||||||
|
|
||||||
|
a) Accompany the work with the complete corresponding
|
||||||
|
machine-readable source code for the Library including whatever
|
||||||
|
changes were used in the work (which must be distributed under
|
||||||
|
Sections 1 and 2 above); and, if the work is an executable linked
|
||||||
|
with the Library, with the complete machine-readable "work that
|
||||||
|
uses the Library", as object code and/or source code, so that the
|
||||||
|
user can modify the Library and then relink to produce a modified
|
||||||
|
executable containing the modified Library. (It is understood
|
||||||
|
that the user who changes the contents of definitions files in the
|
||||||
|
Library will not necessarily be able to recompile the application
|
||||||
|
to use the modified definitions.)
|
||||||
|
|
||||||
|
b) Use a suitable shared library mechanism for linking with the
|
||||||
|
Library. A suitable mechanism is one that (1) uses at run time a
|
||||||
|
copy of the library already present on the user's computer system,
|
||||||
|
rather than copying library functions into the executable, and (2)
|
||||||
|
will operate properly with a modified version of the library, if
|
||||||
|
the user installs one, as long as the modified version is
|
||||||
|
interface-compatible with the version that the work was made with.
|
||||||
|
|
||||||
|
c) Accompany the work with a written offer, valid for at
|
||||||
|
least three years, to give the same user the materials
|
||||||
|
specified in Subsection 6a, above, for a charge no more
|
||||||
|
than the cost of performing this distribution.
|
||||||
|
|
||||||
|
d) If distribution of the work is made by offering access to copy
|
||||||
|
from a designated place, offer equivalent access to copy the above
|
||||||
|
specified materials from the same place.
|
||||||
|
|
||||||
|
e) Verify that the user has already received a copy of these
|
||||||
|
materials or that you have already sent this user a copy.
|
||||||
|
|
||||||
|
For an executable, the required form of the "work that uses the
|
||||||
|
Library" must include any data and utility programs needed for
|
||||||
|
reproducing the executable from it. However, as a special exception,
|
||||||
|
the materials to be distributed need not include anything that is
|
||||||
|
normally distributed (in either source or binary form) with the major
|
||||||
|
components (compiler, kernel, and so on) of the operating system on
|
||||||
|
which the executable runs, unless that component itself accompanies
|
||||||
|
the executable.
|
||||||
|
|
||||||
|
It may happen that this requirement contradicts the license
|
||||||
|
restrictions of other proprietary libraries that do not normally
|
||||||
|
accompany the operating system. Such a contradiction means you cannot
|
||||||
|
use both them and the Library together in an executable that you
|
||||||
|
distribute.
|
||||||
|
|
||||||
|
7. You may place library facilities that are a work based on the
|
||||||
|
Library side-by-side in a single library together with other library
|
||||||
|
facilities not covered by this License, and distribute such a combined
|
||||||
|
library, provided that the separate distribution of the work based on
|
||||||
|
the Library and of the other library facilities is otherwise
|
||||||
|
permitted, and provided that you do these two things:
|
||||||
|
|
||||||
|
a) Accompany the combined library with a copy of the same work
|
||||||
|
based on the Library, uncombined with any other library
|
||||||
|
facilities. This must be distributed under the terms of the
|
||||||
|
Sections above.
|
||||||
|
|
||||||
|
b) Give prominent notice with the combined library of the fact
|
||||||
|
that part of it is a work based on the Library, and explaining
|
||||||
|
where to find the accompanying uncombined form of the same work.
|
||||||
|
|
||||||
|
8. You may not copy, modify, sublicense, link with, or distribute
|
||||||
|
the Library except as expressly provided under this License. Any
|
||||||
|
attempt otherwise to copy, modify, sublicense, link with, or
|
||||||
|
distribute the Library is void, and will automatically terminate your
|
||||||
|
rights under this License. However, parties who have received copies,
|
||||||
|
or rights, from you under this License will not have their licenses
|
||||||
|
terminated so long as such parties remain in full compliance.
|
||||||
|
|
||||||
|
9. You are not required to accept this License, since you have not
|
||||||
|
signed it. However, nothing else grants you permission to modify or
|
||||||
|
distribute the Library or its derivative works. These actions are
|
||||||
|
prohibited by law if you do not accept this License. Therefore, by
|
||||||
|
modifying or distributing the Library (or any work based on the
|
||||||
|
Library), you indicate your acceptance of this License to do so, and
|
||||||
|
all its terms and conditions for copying, distributing or modifying
|
||||||
|
the Library or works based on it.
|
||||||
|
|
||||||
|
10. Each time you redistribute the Library (or any work based on the
|
||||||
|
Library), the recipient automatically receives a license from the
|
||||||
|
original licensor to copy, distribute, link with or modify the Library
|
||||||
|
subject to these terms and conditions. You may not impose any further
|
||||||
|
restrictions on the recipients' exercise of the rights granted herein.
|
||||||
|
You are not responsible for enforcing compliance by third parties with
|
||||||
|
this License.
|
||||||
|
|
||||||
|
11. If, as a consequence of a court judgment or allegation of patent
|
||||||
|
infringement or for any other reason (not limited to patent issues),
|
||||||
|
conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot
|
||||||
|
distribute so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you
|
||||||
|
may not distribute the Library at all. For example, if a patent
|
||||||
|
license would not permit royalty-free redistribution of the Library by
|
||||||
|
all those who receive copies directly or indirectly through you, then
|
||||||
|
the only way you could satisfy both it and this License would be to
|
||||||
|
refrain entirely from distribution of the Library.
|
||||||
|
|
||||||
|
If any portion of this section is held invalid or unenforceable under any
|
||||||
|
particular circumstance, the balance of the section is intended to apply,
|
||||||
|
and the section as a whole is intended to apply in other circumstances.
|
||||||
|
|
||||||
|
It is not the purpose of this section to induce you to infringe any
|
||||||
|
patents or other property right claims or to contest validity of any
|
||||||
|
such claims; this section has the sole purpose of protecting the
|
||||||
|
integrity of the free software distribution system which is
|
||||||
|
implemented by public license practices. Many people have made
|
||||||
|
generous contributions to the wide range of software distributed
|
||||||
|
through that system in reliance on consistent application of that
|
||||||
|
system; it is up to the author/donor to decide if he or she is willing
|
||||||
|
to distribute software through any other system and a licensee cannot
|
||||||
|
impose that choice.
|
||||||
|
|
||||||
|
This section is intended to make thoroughly clear what is believed to
|
||||||
|
be a consequence of the rest of this License.
|
||||||
|
|
||||||
|
12. If the distribution and/or use of the Library is restricted in
|
||||||
|
certain countries either by patents or by copyrighted interfaces, the
|
||||||
|
original copyright holder who places the Library under this License may add
|
||||||
|
an explicit geographical distribution limitation excluding those countries,
|
||||||
|
so that distribution is permitted only in or among countries not thus
|
||||||
|
excluded. In such case, this License incorporates the limitation as if
|
||||||
|
written in the body of this License.
|
||||||
|
|
||||||
|
13. The Free Software Foundation may publish revised and/or new
|
||||||
|
versions of the Lesser General Public License from time to time.
|
||||||
|
Such new versions will be similar in spirit to the present version,
|
||||||
|
but may differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the Library
|
||||||
|
specifies a version number of this License which applies to it and
|
||||||
|
"any later version", you have the option of following the terms and
|
||||||
|
conditions either of that version or of any later version published by
|
||||||
|
the Free Software Foundation. If the Library does not specify a
|
||||||
|
license version number, you may choose any version ever published by
|
||||||
|
the Free Software Foundation.
|
||||||
|
|
||||||
|
14. If you wish to incorporate parts of the Library into other free
|
||||||
|
programs whose distribution conditions are incompatible with these,
|
||||||
|
write to the author to ask for permission. For software which is
|
||||||
|
copyrighted by the Free Software Foundation, write to the Free
|
||||||
|
Software Foundation; we sometimes make exceptions for this. Our
|
||||||
|
decision will be guided by the two goals of preserving the free status
|
||||||
|
of all derivatives of our free software and of promoting the sharing
|
||||||
|
and reuse of software generally.
|
||||||
|
|
||||||
|
NO WARRANTY
|
||||||
|
|
||||||
|
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||||
|
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||||
|
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||||
|
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
|
||||||
|
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
|
||||||
|
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
|
||||||
|
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
|
||||||
|
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
|
||||||
|
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
|
||||||
|
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
|
||||||
|
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||||
|
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
|
||||||
|
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
|
||||||
|
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
|
||||||
|
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||||
|
DAMAGES.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Libraries
|
||||||
|
|
||||||
|
If you develop a new library, and you want it to be of the greatest
|
||||||
|
possible use to the public, we recommend making it free software that
|
||||||
|
everyone can redistribute and change. You can do so by permitting
|
||||||
|
redistribution under these terms (or, alternatively, under the terms of the
|
||||||
|
ordinary General Public License).
|
||||||
|
|
||||||
|
To apply these terms, attach the following notices to the library. It is
|
||||||
|
safest to attach them to the start of each source file to most effectively
|
||||||
|
convey the exclusion of warranty; and each file should have at least the
|
||||||
|
"copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the library's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Lesser General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2.1 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public
|
||||||
|
License along with this library; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or your
|
||||||
|
school, if any, to sign a "copyright disclaimer" for the library, if
|
||||||
|
necessary. Here is a sample; alter the names:
|
||||||
|
|
||||||
|
Yoyodyne, Inc., hereby disclaims all copyright interest in the
|
||||||
|
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
|
||||||
|
|
||||||
|
<signature of Ty Coon>, 1 April 1990
|
||||||
|
Ty Coon, President of Vice
|
||||||
|
|
||||||
|
That's all there is to it!
|
21
venv/Lib/site-packages/cv2/LICENSE.txt
Normal file
21
venv/Lib/site-packages/cv2/LICENSE.txt
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016-2018 Olli-Pekka Heinisuo and contributors
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
8
venv/Lib/site-packages/cv2/__init__.py
Normal file
8
venv/Lib/site-packages/cv2/__init__.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
import importlib
|
||||||
|
|
||||||
|
from .cv2 import *
|
||||||
|
from .data import *
|
||||||
|
|
||||||
|
# wildcard import above does not import "private" variables like __version__
|
||||||
|
# this makes them available
|
||||||
|
globals().update(importlib.import_module('cv2.cv2').__dict__)
|
BIN
venv/Lib/site-packages/cv2/__pycache__/__init__.cpython-37.pyc
Normal file
BIN
venv/Lib/site-packages/cv2/__pycache__/__init__.cpython-37.pyc
Normal file
Binary file not shown.
BIN
venv/Lib/site-packages/cv2/cv2.cp37-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/cv2/cv2.cp37-win_amd64.pyd
Normal file
Binary file not shown.
3
venv/Lib/site-packages/cv2/data/__init__.py
Normal file
3
venv/Lib/site-packages/cv2/data/__init__.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
haarcascades = os.path.join(os.path.dirname(__file__), '')
|
Binary file not shown.
12213
venv/Lib/site-packages/cv2/data/haarcascade_eye.xml
Normal file
12213
venv/Lib/site-packages/cv2/data/haarcascade_eye.xml
Normal file
File diff suppressed because it is too large
Load Diff
22619
venv/Lib/site-packages/cv2/data/haarcascade_eye_tree_eyeglasses.xml
Normal file
22619
venv/Lib/site-packages/cv2/data/haarcascade_eye_tree_eyeglasses.xml
Normal file
File diff suppressed because it is too large
Load Diff
14382
venv/Lib/site-packages/cv2/data/haarcascade_frontalcatface.xml
Normal file
14382
venv/Lib/site-packages/cv2/data/haarcascade_frontalcatface.xml
Normal file
File diff suppressed because it is too large
Load Diff
13394
venv/Lib/site-packages/cv2/data/haarcascade_frontalcatface_extended.xml
Normal file
13394
venv/Lib/site-packages/cv2/data/haarcascade_frontalcatface_extended.xml
Normal file
File diff suppressed because it is too large
Load Diff
24350
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt.xml
Normal file
24350
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt.xml
Normal file
File diff suppressed because it is too large
Load Diff
20719
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt2.xml
Normal file
20719
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt2.xml
Normal file
File diff suppressed because it is too large
Load Diff
96484
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt_tree.xml
Normal file
96484
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_alt_tree.xml
Normal file
File diff suppressed because it is too large
Load Diff
33314
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml
Normal file
33314
venv/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml
Normal file
File diff suppressed because it is too large
Load Diff
17030
venv/Lib/site-packages/cv2/data/haarcascade_fullbody.xml
Normal file
17030
venv/Lib/site-packages/cv2/data/haarcascade_fullbody.xml
Normal file
File diff suppressed because it is too large
Load Diff
7390
venv/Lib/site-packages/cv2/data/haarcascade_lefteye_2splits.xml
Normal file
7390
venv/Lib/site-packages/cv2/data/haarcascade_lefteye_2splits.xml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
14056
venv/Lib/site-packages/cv2/data/haarcascade_lowerbody.xml
Normal file
14056
venv/Lib/site-packages/cv2/data/haarcascade_lowerbody.xml
Normal file
File diff suppressed because it is too large
Load Diff
29690
venv/Lib/site-packages/cv2/data/haarcascade_profileface.xml
Normal file
29690
venv/Lib/site-packages/cv2/data/haarcascade_profileface.xml
Normal file
File diff suppressed because it is too large
Load Diff
7407
venv/Lib/site-packages/cv2/data/haarcascade_righteye_2splits.xml
Normal file
7407
venv/Lib/site-packages/cv2/data/haarcascade_righteye_2splits.xml
Normal file
File diff suppressed because it is too large
Load Diff
2656
venv/Lib/site-packages/cv2/data/haarcascade_russian_plate_number.xml
Normal file
2656
venv/Lib/site-packages/cv2/data/haarcascade_russian_plate_number.xml
Normal file
File diff suppressed because it is too large
Load Diff
6729
venv/Lib/site-packages/cv2/data/haarcascade_smile.xml
Normal file
6729
venv/Lib/site-packages/cv2/data/haarcascade_smile.xml
Normal file
File diff suppressed because it is too large
Load Diff
28134
venv/Lib/site-packages/cv2/data/haarcascade_upperbody.xml
Normal file
28134
venv/Lib/site-packages/cv2/data/haarcascade_upperbody.xml
Normal file
File diff suppressed because it is too large
Load Diff
BIN
venv/Lib/site-packages/cv2/opencv_videoio_ffmpeg411_64.dll
Normal file
BIN
venv/Lib/site-packages/cv2/opencv_videoio_ffmpeg411_64.dll
Normal file
Binary file not shown.
2
venv/Lib/site-packages/easy-install.pth
Normal file
2
venv/Lib/site-packages/easy-install.pth
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
./setuptools-40.8.0-py3.7.egg
|
||||||
|
./pip-19.0.3-py3.7.egg
|
107
venv/Lib/site-packages/future-0.18.2-py3.7.egg-info/PKG-INFO
Normal file
107
venv/Lib/site-packages/future-0.18.2-py3.7.egg-info/PKG-INFO
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
Metadata-Version: 1.2
|
||||||
|
Name: future
|
||||||
|
Version: 0.18.2
|
||||||
|
Summary: Clean single-source support for Python 3 and 2
|
||||||
|
Home-page: https://python-future.org
|
||||||
|
Author: Ed Schofield
|
||||||
|
Author-email: ed@pythoncharmers.com
|
||||||
|
License: MIT
|
||||||
|
Description:
|
||||||
|
future: Easy, safe support for Python 2/3 compatibility
|
||||||
|
=======================================================
|
||||||
|
|
||||||
|
``future`` is the missing compatibility layer between Python 2 and Python
|
||||||
|
3. It allows you to use a single, clean Python 3.x-compatible codebase to
|
||||||
|
support both Python 2 and Python 3 with minimal overhead.
|
||||||
|
|
||||||
|
It is designed to be used as follows::
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division,
|
||||||
|
print_function, unicode_literals)
|
||||||
|
from builtins import (
|
||||||
|
bytes, dict, int, list, object, range, str,
|
||||||
|
ascii, chr, hex, input, next, oct, open,
|
||||||
|
pow, round, super,
|
||||||
|
filter, map, zip)
|
||||||
|
|
||||||
|
followed by predominantly standard, idiomatic Python 3 code that then runs
|
||||||
|
similarly on Python 2.6/2.7 and Python 3.3+.
|
||||||
|
|
||||||
|
The imports have no effect on Python 3. On Python 2, they shadow the
|
||||||
|
corresponding builtins, which normally have different semantics on Python 3
|
||||||
|
versus 2, to provide their Python 3 semantics.
|
||||||
|
|
||||||
|
|
||||||
|
Standard library reorganization
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
``future`` supports the standard library reorganization (PEP 3108) through the
|
||||||
|
following Py3 interfaces:
|
||||||
|
|
||||||
|
>>> # Top-level packages with Py3 names provided on Py2:
|
||||||
|
>>> import html.parser
|
||||||
|
>>> import queue
|
||||||
|
>>> import tkinter.dialog
|
||||||
|
>>> import xmlrpc.client
|
||||||
|
>>> # etc.
|
||||||
|
|
||||||
|
>>> # Aliases provided for extensions to existing Py2 module names:
|
||||||
|
>>> from future.standard_library import install_aliases
|
||||||
|
>>> install_aliases()
|
||||||
|
|
||||||
|
>>> from collections import Counter, OrderedDict # backported to Py2.6
|
||||||
|
>>> from collections import UserDict, UserList, UserString
|
||||||
|
>>> import urllib.request
|
||||||
|
>>> from itertools import filterfalse, zip_longest
|
||||||
|
>>> from subprocess import getoutput, getstatusoutput
|
||||||
|
|
||||||
|
|
||||||
|
Automatic conversion
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
An included script called `futurize
|
||||||
|
<http://python-future.org/automatic_conversion.html>`_ aids in converting
|
||||||
|
code (from either Python 2 or Python 3) to code compatible with both
|
||||||
|
platforms. It is similar to ``python-modernize`` but goes further in
|
||||||
|
providing Python 3 compatibility through the use of the backported types
|
||||||
|
and builtin functions in ``future``.
|
||||||
|
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
See: http://python-future.org
|
||||||
|
|
||||||
|
|
||||||
|
Credits
|
||||||
|
-------
|
||||||
|
|
||||||
|
:Author: Ed Schofield, Jordan M. Adler, et al
|
||||||
|
:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
|
||||||
|
Ltd, Singapore. http://pythoncharmers.com
|
||||||
|
:Others: See docs/credits.rst or http://python-future.org/credits.html
|
||||||
|
|
||||||
|
|
||||||
|
Licensing
|
||||||
|
---------
|
||||||
|
Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
|
||||||
|
The software is distributed under an MIT licence. See LICENSE.txt.
|
||||||
|
|
||||||
|
|
||||||
|
Keywords: future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2
|
||||||
|
Platform: UNKNOWN
|
||||||
|
Classifier: Programming Language :: Python
|
||||||
|
Classifier: Programming Language :: Python :: 2
|
||||||
|
Classifier: Programming Language :: Python :: 2.6
|
||||||
|
Classifier: Programming Language :: Python :: 2.7
|
||||||
|
Classifier: Programming Language :: Python :: 3
|
||||||
|
Classifier: Programming Language :: Python :: 3.3
|
||||||
|
Classifier: Programming Language :: Python :: 3.4
|
||||||
|
Classifier: Programming Language :: Python :: 3.5
|
||||||
|
Classifier: Programming Language :: Python :: 3.6
|
||||||
|
Classifier: Programming Language :: Python :: 3.7
|
||||||
|
Classifier: License :: OSI Approved
|
||||||
|
Classifier: License :: OSI Approved :: MIT License
|
||||||
|
Classifier: Development Status :: 4 - Beta
|
||||||
|
Classifier: Intended Audience :: Developers
|
||||||
|
Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
|
391
venv/Lib/site-packages/future-0.18.2-py3.7.egg-info/SOURCES.txt
Normal file
391
venv/Lib/site-packages/future-0.18.2-py3.7.egg-info/SOURCES.txt
Normal file
@ -0,0 +1,391 @@
|
|||||||
|
.travis.yml
|
||||||
|
LICENSE.txt
|
||||||
|
MANIFEST.in
|
||||||
|
README.rst
|
||||||
|
TESTING.txt
|
||||||
|
check_rst.sh
|
||||||
|
futurize.py
|
||||||
|
pasteurize.py
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
pytest.ini
|
||||||
|
setup.cfg
|
||||||
|
setup.py
|
||||||
|
docs/Makefile
|
||||||
|
docs/automatic_conversion.rst
|
||||||
|
docs/bind_method.rst
|
||||||
|
docs/bytes_object.rst
|
||||||
|
docs/changelog.rst
|
||||||
|
docs/compatible_idioms.rst
|
||||||
|
docs/conf.py
|
||||||
|
docs/contents.rst.inc
|
||||||
|
docs/conversion_limitations.rst
|
||||||
|
docs/credits.rst
|
||||||
|
docs/custom_iterators.rst
|
||||||
|
docs/custom_str_methods.rst
|
||||||
|
docs/dev_notes.rst
|
||||||
|
docs/development.rst
|
||||||
|
docs/dict_object.rst
|
||||||
|
docs/faq.rst
|
||||||
|
docs/func_annotations.rst
|
||||||
|
docs/future-builtins.rst
|
||||||
|
docs/futureext.py
|
||||||
|
docs/futurize.rst
|
||||||
|
docs/futurize_cheatsheet.rst
|
||||||
|
docs/futurize_overview.rst
|
||||||
|
docs/hindsight.rst
|
||||||
|
docs/imports.rst
|
||||||
|
docs/index.rst
|
||||||
|
docs/int_object.rst
|
||||||
|
docs/isinstance.rst
|
||||||
|
docs/limitations.rst
|
||||||
|
docs/metaclasses.rst
|
||||||
|
docs/older_interfaces.rst
|
||||||
|
docs/open_function.rst
|
||||||
|
docs/overview.rst
|
||||||
|
docs/pasteurize.rst
|
||||||
|
docs/quickstart.rst
|
||||||
|
docs/reference.rst
|
||||||
|
docs/roadmap.rst
|
||||||
|
docs/standard_library_imports.rst
|
||||||
|
docs/stdlib_incompatibilities.rst
|
||||||
|
docs/str_object.rst
|
||||||
|
docs/translation.rst
|
||||||
|
docs/unicode_literals.rst
|
||||||
|
docs/upgrading.rst
|
||||||
|
docs/utilities.rst
|
||||||
|
docs/what_else.rst
|
||||||
|
docs/whatsnew.rst
|
||||||
|
docs/why_python3.rst
|
||||||
|
docs/3rd-party-py3k-compat-code/astropy_py3compat.py
|
||||||
|
docs/3rd-party-py3k-compat-code/django_utils_encoding.py
|
||||||
|
docs/3rd-party-py3k-compat-code/gevent_py3k.py
|
||||||
|
docs/3rd-party-py3k-compat-code/ipython_py3compat.py
|
||||||
|
docs/3rd-party-py3k-compat-code/jinja2_compat.py
|
||||||
|
docs/3rd-party-py3k-compat-code/numpy_py3k.py
|
||||||
|
docs/3rd-party-py3k-compat-code/pandas_py3k.py
|
||||||
|
docs/3rd-party-py3k-compat-code/pycrypto_py3compat.py
|
||||||
|
docs/3rd-party-py3k-compat-code/statsmodels_py3k.py
|
||||||
|
docs/_static/python-future-icon-32.ico
|
||||||
|
docs/_static/python-future-icon-white-32.ico
|
||||||
|
docs/_static/python-future-logo-textless-transparent.png
|
||||||
|
docs/_static/python-future-logo.png
|
||||||
|
docs/_static/python-future-logo.tiff
|
||||||
|
docs/_templates/layout.html
|
||||||
|
docs/_templates/navbar.html
|
||||||
|
docs/_templates/sidebarintro.html
|
||||||
|
docs/_templates/sidebarlogo.html
|
||||||
|
docs/_templates/sidebartoc.html
|
||||||
|
docs/_themes/LICENSE
|
||||||
|
docs/_themes/README
|
||||||
|
docs/_themes/future/layout.html
|
||||||
|
docs/_themes/future/relations.html
|
||||||
|
docs/_themes/future/theme.conf
|
||||||
|
docs/_themes/future/static/future.css_t
|
||||||
|
docs/notebooks/Writing Python 2-3 compatible code.ipynb
|
||||||
|
docs/notebooks/bytes object.ipynb
|
||||||
|
docs/notebooks/object special methods (next, bool, ...).ipynb
|
||||||
|
docs/other/auto2to3.py
|
||||||
|
docs/other/find_pattern.py
|
||||||
|
docs/other/fix_notebook_html_colour.py
|
||||||
|
docs/other/lessons.txt
|
||||||
|
docs/other/todo.txt
|
||||||
|
docs/other/upload_future_docs.sh
|
||||||
|
docs/other/useful_links.txt
|
||||||
|
src/__init__.py
|
||||||
|
src/_dummy_thread/__init__.py
|
||||||
|
src/_markupbase/__init__.py
|
||||||
|
src/_thread/__init__.py
|
||||||
|
src/builtins/__init__.py
|
||||||
|
src/copyreg/__init__.py
|
||||||
|
src/future/__init__.py
|
||||||
|
src/future.egg-info/PKG-INFO
|
||||||
|
src/future.egg-info/SOURCES.txt
|
||||||
|
src/future.egg-info/dependency_links.txt
|
||||||
|
src/future.egg-info/entry_points.txt
|
||||||
|
src/future.egg-info/top_level.txt
|
||||||
|
src/future/backports/__init__.py
|
||||||
|
src/future/backports/_markupbase.py
|
||||||
|
src/future/backports/datetime.py
|
||||||
|
src/future/backports/misc.py
|
||||||
|
src/future/backports/socket.py
|
||||||
|
src/future/backports/socketserver.py
|
||||||
|
src/future/backports/total_ordering.py
|
||||||
|
src/future/backports/email/__init__.py
|
||||||
|
src/future/backports/email/_encoded_words.py
|
||||||
|
src/future/backports/email/_header_value_parser.py
|
||||||
|
src/future/backports/email/_parseaddr.py
|
||||||
|
src/future/backports/email/_policybase.py
|
||||||
|
src/future/backports/email/base64mime.py
|
||||||
|
src/future/backports/email/charset.py
|
||||||
|
src/future/backports/email/encoders.py
|
||||||
|
src/future/backports/email/errors.py
|
||||||
|
src/future/backports/email/feedparser.py
|
||||||
|
src/future/backports/email/generator.py
|
||||||
|
src/future/backports/email/header.py
|
||||||
|
src/future/backports/email/headerregistry.py
|
||||||
|
src/future/backports/email/iterators.py
|
||||||
|
src/future/backports/email/message.py
|
||||||
|
src/future/backports/email/parser.py
|
||||||
|
src/future/backports/email/policy.py
|
||||||
|
src/future/backports/email/quoprimime.py
|
||||||
|
src/future/backports/email/utils.py
|
||||||
|
src/future/backports/email/mime/__init__.py
|
||||||
|
src/future/backports/email/mime/application.py
|
||||||
|
src/future/backports/email/mime/audio.py
|
||||||
|
src/future/backports/email/mime/base.py
|
||||||
|
src/future/backports/email/mime/image.py
|
||||||
|
src/future/backports/email/mime/message.py
|
||||||
|
src/future/backports/email/mime/multipart.py
|
||||||
|
src/future/backports/email/mime/nonmultipart.py
|
||||||
|
src/future/backports/email/mime/text.py
|
||||||
|
src/future/backports/html/__init__.py
|
||||||
|
src/future/backports/html/entities.py
|
||||||
|
src/future/backports/html/parser.py
|
||||||
|
src/future/backports/http/__init__.py
|
||||||
|
src/future/backports/http/client.py
|
||||||
|
src/future/backports/http/cookiejar.py
|
||||||
|
src/future/backports/http/cookies.py
|
||||||
|
src/future/backports/http/server.py
|
||||||
|
src/future/backports/test/__init__.py
|
||||||
|
src/future/backports/test/badcert.pem
|
||||||
|
src/future/backports/test/badkey.pem
|
||||||
|
src/future/backports/test/dh512.pem
|
||||||
|
src/future/backports/test/https_svn_python_org_root.pem
|
||||||
|
src/future/backports/test/keycert.passwd.pem
|
||||||
|
src/future/backports/test/keycert.pem
|
||||||
|
src/future/backports/test/keycert2.pem
|
||||||
|
src/future/backports/test/nokia.pem
|
||||||
|
src/future/backports/test/nullbytecert.pem
|
||||||
|
src/future/backports/test/nullcert.pem
|
||||||
|
src/future/backports/test/pystone.py
|
||||||
|
src/future/backports/test/sha256.pem
|
||||||
|
src/future/backports/test/ssl_cert.pem
|
||||||
|
src/future/backports/test/ssl_key.passwd.pem
|
||||||
|
src/future/backports/test/ssl_key.pem
|
||||||
|
src/future/backports/test/ssl_servers.py
|
||||||
|
src/future/backports/test/support.py
|
||||||
|
src/future/backports/urllib/__init__.py
|
||||||
|
src/future/backports/urllib/error.py
|
||||||
|
src/future/backports/urllib/parse.py
|
||||||
|
src/future/backports/urllib/request.py
|
||||||
|
src/future/backports/urllib/response.py
|
||||||
|
src/future/backports/urllib/robotparser.py
|
||||||
|
src/future/backports/xmlrpc/__init__.py
|
||||||
|
src/future/backports/xmlrpc/client.py
|
||||||
|
src/future/backports/xmlrpc/server.py
|
||||||
|
src/future/builtins/__init__.py
|
||||||
|
src/future/builtins/disabled.py
|
||||||
|
src/future/builtins/iterators.py
|
||||||
|
src/future/builtins/misc.py
|
||||||
|
src/future/builtins/new_min_max.py
|
||||||
|
src/future/builtins/newnext.py
|
||||||
|
src/future/builtins/newround.py
|
||||||
|
src/future/builtins/newsuper.py
|
||||||
|
src/future/moves/__init__.py
|
||||||
|
src/future/moves/_dummy_thread.py
|
||||||
|
src/future/moves/_markupbase.py
|
||||||
|
src/future/moves/_thread.py
|
||||||
|
src/future/moves/builtins.py
|
||||||
|
src/future/moves/collections.py
|
||||||
|
src/future/moves/configparser.py
|
||||||
|
src/future/moves/copyreg.py
|
||||||
|
src/future/moves/itertools.py
|
||||||
|
src/future/moves/pickle.py
|
||||||
|
src/future/moves/queue.py
|
||||||
|
src/future/moves/reprlib.py
|
||||||
|
src/future/moves/socketserver.py
|
||||||
|
src/future/moves/subprocess.py
|
||||||
|
src/future/moves/sys.py
|
||||||
|
src/future/moves/winreg.py
|
||||||
|
src/future/moves/dbm/__init__.py
|
||||||
|
src/future/moves/dbm/dumb.py
|
||||||
|
src/future/moves/dbm/gnu.py
|
||||||
|
src/future/moves/dbm/ndbm.py
|
||||||
|
src/future/moves/html/__init__.py
|
||||||
|
src/future/moves/html/entities.py
|
||||||
|
src/future/moves/html/parser.py
|
||||||
|
src/future/moves/http/__init__.py
|
||||||
|
src/future/moves/http/client.py
|
||||||
|
src/future/moves/http/cookiejar.py
|
||||||
|
src/future/moves/http/cookies.py
|
||||||
|
src/future/moves/http/server.py
|
||||||
|
src/future/moves/test/__init__.py
|
||||||
|
src/future/moves/test/support.py
|
||||||
|
src/future/moves/tkinter/__init__.py
|
||||||
|
src/future/moves/tkinter/colorchooser.py
|
||||||
|
src/future/moves/tkinter/commondialog.py
|
||||||
|
src/future/moves/tkinter/constants.py
|
||||||
|
src/future/moves/tkinter/dialog.py
|
||||||
|
src/future/moves/tkinter/dnd.py
|
||||||
|
src/future/moves/tkinter/filedialog.py
|
||||||
|
src/future/moves/tkinter/font.py
|
||||||
|
src/future/moves/tkinter/messagebox.py
|
||||||
|
src/future/moves/tkinter/scrolledtext.py
|
||||||
|
src/future/moves/tkinter/simpledialog.py
|
||||||
|
src/future/moves/tkinter/tix.py
|
||||||
|
src/future/moves/tkinter/ttk.py
|
||||||
|
src/future/moves/urllib/__init__.py
|
||||||
|
src/future/moves/urllib/error.py
|
||||||
|
src/future/moves/urllib/parse.py
|
||||||
|
src/future/moves/urllib/request.py
|
||||||
|
src/future/moves/urllib/response.py
|
||||||
|
src/future/moves/urllib/robotparser.py
|
||||||
|
src/future/moves/xmlrpc/__init__.py
|
||||||
|
src/future/moves/xmlrpc/client.py
|
||||||
|
src/future/moves/xmlrpc/server.py
|
||||||
|
src/future/standard_library/__init__.py
|
||||||
|
src/future/tests/__init__.py
|
||||||
|
src/future/tests/base.py
|
||||||
|
src/future/types/__init__.py
|
||||||
|
src/future/types/newbytes.py
|
||||||
|
src/future/types/newdict.py
|
||||||
|
src/future/types/newint.py
|
||||||
|
src/future/types/newlist.py
|
||||||
|
src/future/types/newmemoryview.py
|
||||||
|
src/future/types/newobject.py
|
||||||
|
src/future/types/newopen.py
|
||||||
|
src/future/types/newrange.py
|
||||||
|
src/future/types/newstr.py
|
||||||
|
src/future/utils/__init__.py
|
||||||
|
src/future/utils/surrogateescape.py
|
||||||
|
src/html/__init__.py
|
||||||
|
src/html/entities.py
|
||||||
|
src/html/parser.py
|
||||||
|
src/http/__init__.py
|
||||||
|
src/http/client.py
|
||||||
|
src/http/cookiejar.py
|
||||||
|
src/http/cookies.py
|
||||||
|
src/http/server.py
|
||||||
|
src/libfuturize/__init__.py
|
||||||
|
src/libfuturize/fixer_util.py
|
||||||
|
src/libfuturize/main.py
|
||||||
|
src/libfuturize/fixes/__init__.py
|
||||||
|
src/libfuturize/fixes/fix_UserDict.py
|
||||||
|
src/libfuturize/fixes/fix_absolute_import.py
|
||||||
|
src/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
|
||||||
|
src/libfuturize/fixes/fix_basestring.py
|
||||||
|
src/libfuturize/fixes/fix_bytes.py
|
||||||
|
src/libfuturize/fixes/fix_cmp.py
|
||||||
|
src/libfuturize/fixes/fix_division.py
|
||||||
|
src/libfuturize/fixes/fix_division_safe.py
|
||||||
|
src/libfuturize/fixes/fix_execfile.py
|
||||||
|
src/libfuturize/fixes/fix_future_builtins.py
|
||||||
|
src/libfuturize/fixes/fix_future_standard_library.py
|
||||||
|
src/libfuturize/fixes/fix_future_standard_library_urllib.py
|
||||||
|
src/libfuturize/fixes/fix_input.py
|
||||||
|
src/libfuturize/fixes/fix_metaclass.py
|
||||||
|
src/libfuturize/fixes/fix_next_call.py
|
||||||
|
src/libfuturize/fixes/fix_object.py
|
||||||
|
src/libfuturize/fixes/fix_oldstr_wrap.py
|
||||||
|
src/libfuturize/fixes/fix_order___future__imports.py
|
||||||
|
src/libfuturize/fixes/fix_print.py
|
||||||
|
src/libfuturize/fixes/fix_print_with_import.py
|
||||||
|
src/libfuturize/fixes/fix_raise.py
|
||||||
|
src/libfuturize/fixes/fix_remove_old__future__imports.py
|
||||||
|
src/libfuturize/fixes/fix_unicode_keep_u.py
|
||||||
|
src/libfuturize/fixes/fix_unicode_literals_import.py
|
||||||
|
src/libfuturize/fixes/fix_xrange_with_import.py
|
||||||
|
src/libpasteurize/__init__.py
|
||||||
|
src/libpasteurize/main.py
|
||||||
|
src/libpasteurize/fixes/__init__.py
|
||||||
|
src/libpasteurize/fixes/feature_base.py
|
||||||
|
src/libpasteurize/fixes/fix_add_all__future__imports.py
|
||||||
|
src/libpasteurize/fixes/fix_add_all_future_builtins.py
|
||||||
|
src/libpasteurize/fixes/fix_add_future_standard_library_import.py
|
||||||
|
src/libpasteurize/fixes/fix_annotations.py
|
||||||
|
src/libpasteurize/fixes/fix_division.py
|
||||||
|
src/libpasteurize/fixes/fix_features.py
|
||||||
|
src/libpasteurize/fixes/fix_fullargspec.py
|
||||||
|
src/libpasteurize/fixes/fix_future_builtins.py
|
||||||
|
src/libpasteurize/fixes/fix_getcwd.py
|
||||||
|
src/libpasteurize/fixes/fix_imports.py
|
||||||
|
src/libpasteurize/fixes/fix_imports2.py
|
||||||
|
src/libpasteurize/fixes/fix_kwargs.py
|
||||||
|
src/libpasteurize/fixes/fix_memoryview.py
|
||||||
|
src/libpasteurize/fixes/fix_metaclass.py
|
||||||
|
src/libpasteurize/fixes/fix_newstyle.py
|
||||||
|
src/libpasteurize/fixes/fix_next.py
|
||||||
|
src/libpasteurize/fixes/fix_printfunction.py
|
||||||
|
src/libpasteurize/fixes/fix_raise.py
|
||||||
|
src/libpasteurize/fixes/fix_raise_.py
|
||||||
|
src/libpasteurize/fixes/fix_throw.py
|
||||||
|
src/libpasteurize/fixes/fix_unpacking.py
|
||||||
|
src/past/__init__.py
|
||||||
|
src/past/builtins/__init__.py
|
||||||
|
src/past/builtins/misc.py
|
||||||
|
src/past/builtins/noniterators.py
|
||||||
|
src/past/translation/__init__.py
|
||||||
|
src/past/types/__init__.py
|
||||||
|
src/past/types/basestring.py
|
||||||
|
src/past/types/olddict.py
|
||||||
|
src/past/types/oldstr.py
|
||||||
|
src/past/utils/__init__.py
|
||||||
|
src/queue/__init__.py
|
||||||
|
src/reprlib/__init__.py
|
||||||
|
src/socketserver/__init__.py
|
||||||
|
src/tkinter/__init__.py
|
||||||
|
src/tkinter/colorchooser.py
|
||||||
|
src/tkinter/commondialog.py
|
||||||
|
src/tkinter/constants.py
|
||||||
|
src/tkinter/dialog.py
|
||||||
|
src/tkinter/dnd.py
|
||||||
|
src/tkinter/filedialog.py
|
||||||
|
src/tkinter/font.py
|
||||||
|
src/tkinter/messagebox.py
|
||||||
|
src/tkinter/scrolledtext.py
|
||||||
|
src/tkinter/simpledialog.py
|
||||||
|
src/tkinter/tix.py
|
||||||
|
src/tkinter/ttk.py
|
||||||
|
src/winreg/__init__.py
|
||||||
|
src/xmlrpc/__init__.py
|
||||||
|
src/xmlrpc/client.py
|
||||||
|
src/xmlrpc/server.py
|
||||||
|
tests/test_future/__init__.py
|
||||||
|
tests/test_future/test_backports.py
|
||||||
|
tests/test_future/test_buffer.py
|
||||||
|
tests/test_future/test_builtins.py
|
||||||
|
tests/test_future/test_builtins_explicit_import.py
|
||||||
|
tests/test_future/test_bytes.py
|
||||||
|
tests/test_future/test_chainmap.py
|
||||||
|
tests/test_future/test_common_iterators.py
|
||||||
|
tests/test_future/test_decorators.py
|
||||||
|
tests/test_future/test_dict.py
|
||||||
|
tests/test_future/test_email_multipart.py
|
||||||
|
tests/test_future/test_explicit_imports.py
|
||||||
|
tests/test_future/test_futurize.py
|
||||||
|
tests/test_future/test_html.py
|
||||||
|
tests/test_future/test_htmlparser.py
|
||||||
|
tests/test_future/test_http_cookiejar.py
|
||||||
|
tests/test_future/test_httplib.py
|
||||||
|
tests/test_future/test_import_star.py
|
||||||
|
tests/test_future/test_imports_httplib.py
|
||||||
|
tests/test_future/test_imports_urllib.py
|
||||||
|
tests/test_future/test_int.py
|
||||||
|
tests/test_future/test_int_old_division.py
|
||||||
|
tests/test_future/test_isinstance.py
|
||||||
|
tests/test_future/test_libfuturize_fixers.py
|
||||||
|
tests/test_future/test_list.py
|
||||||
|
tests/test_future/test_magicsuper.py
|
||||||
|
tests/test_future/test_object.py
|
||||||
|
tests/test_future/test_pasteurize.py
|
||||||
|
tests/test_future/test_py2_str_literals_to_bytes.py
|
||||||
|
tests/test_future/test_range.py
|
||||||
|
tests/test_future/test_requests.py
|
||||||
|
tests/test_future/test_standard_library.py
|
||||||
|
tests/test_future/test_str.py
|
||||||
|
tests/test_future/test_super.py
|
||||||
|
tests/test_future/test_surrogateescape.py
|
||||||
|
tests/test_future/test_urllib.py
|
||||||
|
tests/test_future/test_urllib2.py
|
||||||
|
tests/test_future/test_urllib_response.py
|
||||||
|
tests/test_future/test_urllib_toplevel.py
|
||||||
|
tests/test_future/test_urllibnet.py
|
||||||
|
tests/test_future/test_urlparse.py
|
||||||
|
tests/test_future/test_utils.py
|
||||||
|
tests/test_past/__init__.py
|
||||||
|
tests/test_past/test_basestring.py
|
||||||
|
tests/test_past/test_builtins.py
|
||||||
|
tests/test_past/test_noniterators.py
|
||||||
|
tests/test_past/test_olddict.py
|
||||||
|
tests/test_past/test_oldstr.py
|
||||||
|
tests/test_past/test_translation.py
|
@ -0,0 +1 @@
|
|||||||
|
|
@ -0,0 +1,4 @@
|
|||||||
|
[console_scripts]
|
||||||
|
futurize = libfuturize.main:main
|
||||||
|
pasteurize = libpasteurize.main:main
|
||||||
|
|
@ -0,0 +1,415 @@
|
|||||||
|
..\..\..\Scripts\futurize-script.py
|
||||||
|
..\..\..\Scripts\futurize.exe
|
||||||
|
..\..\..\Scripts\pasteurize-script.py
|
||||||
|
..\..\..\Scripts\pasteurize.exe
|
||||||
|
..\future\__init__.py
|
||||||
|
..\future\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\__init__.py
|
||||||
|
..\future\backports\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\_markupbase.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\datetime.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\misc.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\socket.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\socketserver.cpython-37.pyc
|
||||||
|
..\future\backports\__pycache__\total_ordering.cpython-37.pyc
|
||||||
|
..\future\backports\_markupbase.py
|
||||||
|
..\future\backports\datetime.py
|
||||||
|
..\future\backports\email\__init__.py
|
||||||
|
..\future\backports\email\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\_encoded_words.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\_header_value_parser.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\_parseaddr.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\_policybase.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\base64mime.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\charset.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\encoders.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\errors.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\feedparser.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\generator.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\header.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\headerregistry.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\iterators.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\message.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\parser.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\policy.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\quoprimime.cpython-37.pyc
|
||||||
|
..\future\backports\email\__pycache__\utils.cpython-37.pyc
|
||||||
|
..\future\backports\email\_encoded_words.py
|
||||||
|
..\future\backports\email\_header_value_parser.py
|
||||||
|
..\future\backports\email\_parseaddr.py
|
||||||
|
..\future\backports\email\_policybase.py
|
||||||
|
..\future\backports\email\base64mime.py
|
||||||
|
..\future\backports\email\charset.py
|
||||||
|
..\future\backports\email\encoders.py
|
||||||
|
..\future\backports\email\errors.py
|
||||||
|
..\future\backports\email\feedparser.py
|
||||||
|
..\future\backports\email\generator.py
|
||||||
|
..\future\backports\email\header.py
|
||||||
|
..\future\backports\email\headerregistry.py
|
||||||
|
..\future\backports\email\iterators.py
|
||||||
|
..\future\backports\email\message.py
|
||||||
|
..\future\backports\email\mime\__init__.py
|
||||||
|
..\future\backports\email\mime\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\application.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\audio.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\base.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\image.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\message.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\multipart.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\nonmultipart.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\__pycache__\text.cpython-37.pyc
|
||||||
|
..\future\backports\email\mime\application.py
|
||||||
|
..\future\backports\email\mime\audio.py
|
||||||
|
..\future\backports\email\mime\base.py
|
||||||
|
..\future\backports\email\mime\image.py
|
||||||
|
..\future\backports\email\mime\message.py
|
||||||
|
..\future\backports\email\mime\multipart.py
|
||||||
|
..\future\backports\email\mime\nonmultipart.py
|
||||||
|
..\future\backports\email\mime\text.py
|
||||||
|
..\future\backports\email\parser.py
|
||||||
|
..\future\backports\email\policy.py
|
||||||
|
..\future\backports\email\quoprimime.py
|
||||||
|
..\future\backports\email\utils.py
|
||||||
|
..\future\backports\html\__init__.py
|
||||||
|
..\future\backports\html\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\html\__pycache__\entities.cpython-37.pyc
|
||||||
|
..\future\backports\html\__pycache__\parser.cpython-37.pyc
|
||||||
|
..\future\backports\html\entities.py
|
||||||
|
..\future\backports\html\parser.py
|
||||||
|
..\future\backports\http\__init__.py
|
||||||
|
..\future\backports\http\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\http\__pycache__\client.cpython-37.pyc
|
||||||
|
..\future\backports\http\__pycache__\cookiejar.cpython-37.pyc
|
||||||
|
..\future\backports\http\__pycache__\cookies.cpython-37.pyc
|
||||||
|
..\future\backports\http\__pycache__\server.cpython-37.pyc
|
||||||
|
..\future\backports\http\client.py
|
||||||
|
..\future\backports\http\cookiejar.py
|
||||||
|
..\future\backports\http\cookies.py
|
||||||
|
..\future\backports\http\server.py
|
||||||
|
..\future\backports\misc.py
|
||||||
|
..\future\backports\socket.py
|
||||||
|
..\future\backports\socketserver.py
|
||||||
|
..\future\backports\test\__init__.py
|
||||||
|
..\future\backports\test\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\test\__pycache__\pystone.cpython-37.pyc
|
||||||
|
..\future\backports\test\__pycache__\ssl_servers.cpython-37.pyc
|
||||||
|
..\future\backports\test\__pycache__\support.cpython-37.pyc
|
||||||
|
..\future\backports\test\badcert.pem
|
||||||
|
..\future\backports\test\badkey.pem
|
||||||
|
..\future\backports\test\dh512.pem
|
||||||
|
..\future\backports\test\https_svn_python_org_root.pem
|
||||||
|
..\future\backports\test\keycert.passwd.pem
|
||||||
|
..\future\backports\test\keycert.pem
|
||||||
|
..\future\backports\test\keycert2.pem
|
||||||
|
..\future\backports\test\nokia.pem
|
||||||
|
..\future\backports\test\nullbytecert.pem
|
||||||
|
..\future\backports\test\nullcert.pem
|
||||||
|
..\future\backports\test\pystone.py
|
||||||
|
..\future\backports\test\sha256.pem
|
||||||
|
..\future\backports\test\ssl_cert.pem
|
||||||
|
..\future\backports\test\ssl_key.passwd.pem
|
||||||
|
..\future\backports\test\ssl_key.pem
|
||||||
|
..\future\backports\test\ssl_servers.py
|
||||||
|
..\future\backports\test\support.py
|
||||||
|
..\future\backports\total_ordering.py
|
||||||
|
..\future\backports\urllib\__init__.py
|
||||||
|
..\future\backports\urllib\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\__pycache__\error.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\__pycache__\parse.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\__pycache__\request.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\__pycache__\response.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\__pycache__\robotparser.cpython-37.pyc
|
||||||
|
..\future\backports\urllib\error.py
|
||||||
|
..\future\backports\urllib\parse.py
|
||||||
|
..\future\backports\urllib\request.py
|
||||||
|
..\future\backports\urllib\response.py
|
||||||
|
..\future\backports\urllib\robotparser.py
|
||||||
|
..\future\backports\xmlrpc\__init__.py
|
||||||
|
..\future\backports\xmlrpc\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\backports\xmlrpc\__pycache__\client.cpython-37.pyc
|
||||||
|
..\future\backports\xmlrpc\__pycache__\server.cpython-37.pyc
|
||||||
|
..\future\backports\xmlrpc\client.py
|
||||||
|
..\future\backports\xmlrpc\server.py
|
||||||
|
..\future\builtins\__init__.py
|
||||||
|
..\future\builtins\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\disabled.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\iterators.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\misc.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\new_min_max.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\newnext.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\newround.cpython-37.pyc
|
||||||
|
..\future\builtins\__pycache__\newsuper.cpython-37.pyc
|
||||||
|
..\future\builtins\disabled.py
|
||||||
|
..\future\builtins\iterators.py
|
||||||
|
..\future\builtins\misc.py
|
||||||
|
..\future\builtins\new_min_max.py
|
||||||
|
..\future\builtins\newnext.py
|
||||||
|
..\future\builtins\newround.py
|
||||||
|
..\future\builtins\newsuper.py
|
||||||
|
..\future\moves\__init__.py
|
||||||
|
..\future\moves\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\_dummy_thread.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\_markupbase.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\_thread.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\builtins.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\collections.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\configparser.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\copyreg.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\itertools.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\pickle.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\queue.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\reprlib.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\socketserver.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\subprocess.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\sys.cpython-37.pyc
|
||||||
|
..\future\moves\__pycache__\winreg.cpython-37.pyc
|
||||||
|
..\future\moves\_dummy_thread.py
|
||||||
|
..\future\moves\_markupbase.py
|
||||||
|
..\future\moves\_thread.py
|
||||||
|
..\future\moves\builtins.py
|
||||||
|
..\future\moves\collections.py
|
||||||
|
..\future\moves\configparser.py
|
||||||
|
..\future\moves\copyreg.py
|
||||||
|
..\future\moves\dbm\__init__.py
|
||||||
|
..\future\moves\dbm\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\dbm\__pycache__\dumb.cpython-37.pyc
|
||||||
|
..\future\moves\dbm\__pycache__\gnu.cpython-37.pyc
|
||||||
|
..\future\moves\dbm\__pycache__\ndbm.cpython-37.pyc
|
||||||
|
..\future\moves\dbm\dumb.py
|
||||||
|
..\future\moves\dbm\gnu.py
|
||||||
|
..\future\moves\dbm\ndbm.py
|
||||||
|
..\future\moves\html\__init__.py
|
||||||
|
..\future\moves\html\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\html\__pycache__\entities.cpython-37.pyc
|
||||||
|
..\future\moves\html\__pycache__\parser.cpython-37.pyc
|
||||||
|
..\future\moves\html\entities.py
|
||||||
|
..\future\moves\html\parser.py
|
||||||
|
..\future\moves\http\__init__.py
|
||||||
|
..\future\moves\http\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\http\__pycache__\client.cpython-37.pyc
|
||||||
|
..\future\moves\http\__pycache__\cookiejar.cpython-37.pyc
|
||||||
|
..\future\moves\http\__pycache__\cookies.cpython-37.pyc
|
||||||
|
..\future\moves\http\__pycache__\server.cpython-37.pyc
|
||||||
|
..\future\moves\http\client.py
|
||||||
|
..\future\moves\http\cookiejar.py
|
||||||
|
..\future\moves\http\cookies.py
|
||||||
|
..\future\moves\http\server.py
|
||||||
|
..\future\moves\itertools.py
|
||||||
|
..\future\moves\pickle.py
|
||||||
|
..\future\moves\queue.py
|
||||||
|
..\future\moves\reprlib.py
|
||||||
|
..\future\moves\socketserver.py
|
||||||
|
..\future\moves\subprocess.py
|
||||||
|
..\future\moves\sys.py
|
||||||
|
..\future\moves\test\__init__.py
|
||||||
|
..\future\moves\test\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\test\__pycache__\support.cpython-37.pyc
|
||||||
|
..\future\moves\test\support.py
|
||||||
|
..\future\moves\tkinter\__init__.py
|
||||||
|
..\future\moves\tkinter\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\colorchooser.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\commondialog.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\constants.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\dialog.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\dnd.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\filedialog.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\font.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\messagebox.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\scrolledtext.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\simpledialog.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\tix.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\__pycache__\ttk.cpython-37.pyc
|
||||||
|
..\future\moves\tkinter\colorchooser.py
|
||||||
|
..\future\moves\tkinter\commondialog.py
|
||||||
|
..\future\moves\tkinter\constants.py
|
||||||
|
..\future\moves\tkinter\dialog.py
|
||||||
|
..\future\moves\tkinter\dnd.py
|
||||||
|
..\future\moves\tkinter\filedialog.py
|
||||||
|
..\future\moves\tkinter\font.py
|
||||||
|
..\future\moves\tkinter\messagebox.py
|
||||||
|
..\future\moves\tkinter\scrolledtext.py
|
||||||
|
..\future\moves\tkinter\simpledialog.py
|
||||||
|
..\future\moves\tkinter\tix.py
|
||||||
|
..\future\moves\tkinter\ttk.py
|
||||||
|
..\future\moves\urllib\__init__.py
|
||||||
|
..\future\moves\urllib\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\__pycache__\error.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\__pycache__\parse.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\__pycache__\request.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\__pycache__\response.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\__pycache__\robotparser.cpython-37.pyc
|
||||||
|
..\future\moves\urllib\error.py
|
||||||
|
..\future\moves\urllib\parse.py
|
||||||
|
..\future\moves\urllib\request.py
|
||||||
|
..\future\moves\urllib\response.py
|
||||||
|
..\future\moves\urllib\robotparser.py
|
||||||
|
..\future\moves\winreg.py
|
||||||
|
..\future\moves\xmlrpc\__init__.py
|
||||||
|
..\future\moves\xmlrpc\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\moves\xmlrpc\__pycache__\client.cpython-37.pyc
|
||||||
|
..\future\moves\xmlrpc\__pycache__\server.cpython-37.pyc
|
||||||
|
..\future\moves\xmlrpc\client.py
|
||||||
|
..\future\moves\xmlrpc\server.py
|
||||||
|
..\future\standard_library\__init__.py
|
||||||
|
..\future\standard_library\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\tests\__init__.py
|
||||||
|
..\future\tests\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\tests\__pycache__\base.cpython-37.pyc
|
||||||
|
..\future\tests\base.py
|
||||||
|
..\future\types\__init__.py
|
||||||
|
..\future\types\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newbytes.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newdict.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newint.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newlist.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newmemoryview.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newobject.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newopen.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newrange.cpython-37.pyc
|
||||||
|
..\future\types\__pycache__\newstr.cpython-37.pyc
|
||||||
|
..\future\types\newbytes.py
|
||||||
|
..\future\types\newdict.py
|
||||||
|
..\future\types\newint.py
|
||||||
|
..\future\types\newlist.py
|
||||||
|
..\future\types\newmemoryview.py
|
||||||
|
..\future\types\newobject.py
|
||||||
|
..\future\types\newopen.py
|
||||||
|
..\future\types\newrange.py
|
||||||
|
..\future\types\newstr.py
|
||||||
|
..\future\utils\__init__.py
|
||||||
|
..\future\utils\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\future\utils\__pycache__\surrogateescape.cpython-37.pyc
|
||||||
|
..\future\utils\surrogateescape.py
|
||||||
|
..\libfuturize\__init__.py
|
||||||
|
..\libfuturize\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\libfuturize\__pycache__\fixer_util.cpython-37.pyc
|
||||||
|
..\libfuturize\__pycache__\main.cpython-37.pyc
|
||||||
|
..\libfuturize\fixer_util.py
|
||||||
|
..\libfuturize\fixes\__init__.py
|
||||||
|
..\libfuturize\fixes\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_UserDict.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_absolute_import.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_add__future__imports_except_unicode_literals.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_basestring.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_bytes.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_cmp.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_division.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_division_safe.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_execfile.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_future_builtins.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_future_standard_library.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_future_standard_library_urllib.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_input.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_metaclass.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_next_call.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_object.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_oldstr_wrap.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_order___future__imports.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_print.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_print_with_import.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_raise.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_remove_old__future__imports.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_unicode_keep_u.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_unicode_literals_import.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\__pycache__\fix_xrange_with_import.cpython-37.pyc
|
||||||
|
..\libfuturize\fixes\fix_UserDict.py
|
||||||
|
..\libfuturize\fixes\fix_absolute_import.py
|
||||||
|
..\libfuturize\fixes\fix_add__future__imports_except_unicode_literals.py
|
||||||
|
..\libfuturize\fixes\fix_basestring.py
|
||||||
|
..\libfuturize\fixes\fix_bytes.py
|
||||||
|
..\libfuturize\fixes\fix_cmp.py
|
||||||
|
..\libfuturize\fixes\fix_division.py
|
||||||
|
..\libfuturize\fixes\fix_division_safe.py
|
||||||
|
..\libfuturize\fixes\fix_execfile.py
|
||||||
|
..\libfuturize\fixes\fix_future_builtins.py
|
||||||
|
..\libfuturize\fixes\fix_future_standard_library.py
|
||||||
|
..\libfuturize\fixes\fix_future_standard_library_urllib.py
|
||||||
|
..\libfuturize\fixes\fix_input.py
|
||||||
|
..\libfuturize\fixes\fix_metaclass.py
|
||||||
|
..\libfuturize\fixes\fix_next_call.py
|
||||||
|
..\libfuturize\fixes\fix_object.py
|
||||||
|
..\libfuturize\fixes\fix_oldstr_wrap.py
|
||||||
|
..\libfuturize\fixes\fix_order___future__imports.py
|
||||||
|
..\libfuturize\fixes\fix_print.py
|
||||||
|
..\libfuturize\fixes\fix_print_with_import.py
|
||||||
|
..\libfuturize\fixes\fix_raise.py
|
||||||
|
..\libfuturize\fixes\fix_remove_old__future__imports.py
|
||||||
|
..\libfuturize\fixes\fix_unicode_keep_u.py
|
||||||
|
..\libfuturize\fixes\fix_unicode_literals_import.py
|
||||||
|
..\libfuturize\fixes\fix_xrange_with_import.py
|
||||||
|
..\libfuturize\main.py
|
||||||
|
..\libpasteurize\__init__.py
|
||||||
|
..\libpasteurize\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\libpasteurize\__pycache__\main.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__init__.py
|
||||||
|
..\libpasteurize\fixes\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\feature_base.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_add_all__future__imports.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_add_all_future_builtins.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_add_future_standard_library_import.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_annotations.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_division.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_features.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_fullargspec.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_future_builtins.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_getcwd.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_imports.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_imports2.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_kwargs.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_memoryview.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_metaclass.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_newstyle.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_next.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_printfunction.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_raise.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_raise_.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_throw.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\__pycache__\fix_unpacking.cpython-37.pyc
|
||||||
|
..\libpasteurize\fixes\feature_base.py
|
||||||
|
..\libpasteurize\fixes\fix_add_all__future__imports.py
|
||||||
|
..\libpasteurize\fixes\fix_add_all_future_builtins.py
|
||||||
|
..\libpasteurize\fixes\fix_add_future_standard_library_import.py
|
||||||
|
..\libpasteurize\fixes\fix_annotations.py
|
||||||
|
..\libpasteurize\fixes\fix_division.py
|
||||||
|
..\libpasteurize\fixes\fix_features.py
|
||||||
|
..\libpasteurize\fixes\fix_fullargspec.py
|
||||||
|
..\libpasteurize\fixes\fix_future_builtins.py
|
||||||
|
..\libpasteurize\fixes\fix_getcwd.py
|
||||||
|
..\libpasteurize\fixes\fix_imports.py
|
||||||
|
..\libpasteurize\fixes\fix_imports2.py
|
||||||
|
..\libpasteurize\fixes\fix_kwargs.py
|
||||||
|
..\libpasteurize\fixes\fix_memoryview.py
|
||||||
|
..\libpasteurize\fixes\fix_metaclass.py
|
||||||
|
..\libpasteurize\fixes\fix_newstyle.py
|
||||||
|
..\libpasteurize\fixes\fix_next.py
|
||||||
|
..\libpasteurize\fixes\fix_printfunction.py
|
||||||
|
..\libpasteurize\fixes\fix_raise.py
|
||||||
|
..\libpasteurize\fixes\fix_raise_.py
|
||||||
|
..\libpasteurize\fixes\fix_throw.py
|
||||||
|
..\libpasteurize\fixes\fix_unpacking.py
|
||||||
|
..\libpasteurize\main.py
|
||||||
|
..\past\__init__.py
|
||||||
|
..\past\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\past\builtins\__init__.py
|
||||||
|
..\past\builtins\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\past\builtins\__pycache__\misc.cpython-37.pyc
|
||||||
|
..\past\builtins\__pycache__\noniterators.cpython-37.pyc
|
||||||
|
..\past\builtins\misc.py
|
||||||
|
..\past\builtins\noniterators.py
|
||||||
|
..\past\translation\__init__.py
|
||||||
|
..\past\translation\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\past\types\__init__.py
|
||||||
|
..\past\types\__pycache__\__init__.cpython-37.pyc
|
||||||
|
..\past\types\__pycache__\basestring.cpython-37.pyc
|
||||||
|
..\past\types\__pycache__\olddict.cpython-37.pyc
|
||||||
|
..\past\types\__pycache__\oldstr.cpython-37.pyc
|
||||||
|
..\past\types\basestring.py
|
||||||
|
..\past\types\olddict.py
|
||||||
|
..\past\types\oldstr.py
|
||||||
|
..\past\utils\__init__.py
|
||||||
|
..\past\utils\__pycache__\__init__.cpython-37.pyc
|
||||||
|
PKG-INFO
|
||||||
|
SOURCES.txt
|
||||||
|
dependency_links.txt
|
||||||
|
entry_points.txt
|
||||||
|
top_level.txt
|
@ -0,0 +1,4 @@
|
|||||||
|
future
|
||||||
|
libfuturize
|
||||||
|
libpasteurize
|
||||||
|
past
|
93
venv/Lib/site-packages/future/__init__.py
Normal file
93
venv/Lib/site-packages/future/__init__.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
"""
|
||||||
|
future: Easy, safe support for Python 2/3 compatibility
|
||||||
|
=======================================================
|
||||||
|
|
||||||
|
``future`` is the missing compatibility layer between Python 2 and Python
|
||||||
|
3. It allows you to use a single, clean Python 3.x-compatible codebase to
|
||||||
|
support both Python 2 and Python 3 with minimal overhead.
|
||||||
|
|
||||||
|
It is designed to be used as follows::
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division,
|
||||||
|
print_function, unicode_literals)
|
||||||
|
from builtins import (
|
||||||
|
bytes, dict, int, list, object, range, str,
|
||||||
|
ascii, chr, hex, input, next, oct, open,
|
||||||
|
pow, round, super,
|
||||||
|
filter, map, zip)
|
||||||
|
|
||||||
|
followed by predominantly standard, idiomatic Python 3 code that then runs
|
||||||
|
similarly on Python 2.6/2.7 and Python 3.3+.
|
||||||
|
|
||||||
|
The imports have no effect on Python 3. On Python 2, they shadow the
|
||||||
|
corresponding builtins, which normally have different semantics on Python 3
|
||||||
|
versus 2, to provide their Python 3 semantics.
|
||||||
|
|
||||||
|
|
||||||
|
Standard library reorganization
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
``future`` supports the standard library reorganization (PEP 3108) through the
|
||||||
|
following Py3 interfaces:
|
||||||
|
|
||||||
|
>>> # Top-level packages with Py3 names provided on Py2:
|
||||||
|
>>> import html.parser
|
||||||
|
>>> import queue
|
||||||
|
>>> import tkinter.dialog
|
||||||
|
>>> import xmlrpc.client
|
||||||
|
>>> # etc.
|
||||||
|
|
||||||
|
>>> # Aliases provided for extensions to existing Py2 module names:
|
||||||
|
>>> from future.standard_library import install_aliases
|
||||||
|
>>> install_aliases()
|
||||||
|
|
||||||
|
>>> from collections import Counter, OrderedDict # backported to Py2.6
|
||||||
|
>>> from collections import UserDict, UserList, UserString
|
||||||
|
>>> import urllib.request
|
||||||
|
>>> from itertools import filterfalse, zip_longest
|
||||||
|
>>> from subprocess import getoutput, getstatusoutput
|
||||||
|
|
||||||
|
|
||||||
|
Automatic conversion
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
An included script called `futurize
|
||||||
|
<http://python-future.org/automatic_conversion.html>`_ aids in converting
|
||||||
|
code (from either Python 2 or Python 3) to code compatible with both
|
||||||
|
platforms. It is similar to ``python-modernize`` but goes further in
|
||||||
|
providing Python 3 compatibility through the use of the backported types
|
||||||
|
and builtin functions in ``future``.
|
||||||
|
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
-------------
|
||||||
|
|
||||||
|
See: http://python-future.org
|
||||||
|
|
||||||
|
|
||||||
|
Credits
|
||||||
|
-------
|
||||||
|
|
||||||
|
:Author: Ed Schofield, Jordan M. Adler, et al
|
||||||
|
:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
|
||||||
|
Ltd, Singapore. http://pythoncharmers.com
|
||||||
|
:Others: See docs/credits.rst or http://python-future.org/credits.html
|
||||||
|
|
||||||
|
|
||||||
|
Licensing
|
||||||
|
---------
|
||||||
|
Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
|
||||||
|
The software is distributed under an MIT licence. See LICENSE.txt.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
__title__ = 'future'
|
||||||
|
__author__ = 'Ed Schofield'
|
||||||
|
__license__ = 'MIT'
|
||||||
|
__copyright__ = 'Copyright 2013-2019 Python Charmers Pty Ltd'
|
||||||
|
__ver_major__ = 0
|
||||||
|
__ver_minor__ = 18
|
||||||
|
__ver_patch__ = 2
|
||||||
|
__ver_sub__ = ''
|
||||||
|
__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
|
||||||
|
__ver_patch__, __ver_sub__)
|
26
venv/Lib/site-packages/future/backports/__init__.py
Normal file
26
venv/Lib/site-packages/future/backports/__init__.py
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
"""
|
||||||
|
future.backports package
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__future_module__ = True
|
||||||
|
from future.standard_library import import_top_level_modules
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
import_top_level_modules()
|
||||||
|
|
||||||
|
|
||||||
|
from .misc import (ceil,
|
||||||
|
OrderedDict,
|
||||||
|
Counter,
|
||||||
|
ChainMap,
|
||||||
|
check_output,
|
||||||
|
count,
|
||||||
|
recursive_repr,
|
||||||
|
_count_elements,
|
||||||
|
cmp_to_key
|
||||||
|
)
|
422
venv/Lib/site-packages/future/backports/_markupbase.py
Normal file
422
venv/Lib/site-packages/future/backports/_markupbase.py
Normal file
@ -0,0 +1,422 @@
|
|||||||
|
"""Shared support for scanning document type declarations in HTML and XHTML.
|
||||||
|
|
||||||
|
Backported for python-future from Python 3.3. Reason: ParserBase is an
|
||||||
|
old-style class in the Python 2.7 source of markupbase.py, which I suspect
|
||||||
|
might be the cause of sporadic unit-test failures on travis-ci.org with
|
||||||
|
test_htmlparser.py. The test failures look like this:
|
||||||
|
|
||||||
|
======================================================================
|
||||||
|
|
||||||
|
ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase)
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
|
Traceback (most recent call last):
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement
|
||||||
|
[("starttag", "a", [("b", "&><\"'")])])
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check
|
||||||
|
collector = self.get_collector()
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector
|
||||||
|
return EventCollector(strict=True)
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__
|
||||||
|
html.parser.HTMLParser.__init__(self, *args, **kw)
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__
|
||||||
|
self.reset()
|
||||||
|
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset
|
||||||
|
_markupbase.ParserBase.reset(self)
|
||||||
|
|
||||||
|
TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead)
|
||||||
|
|
||||||
|
This module is used as a foundation for the html.parser module. It has no
|
||||||
|
documented public API and should not be used directly.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
||||||
|
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
||||||
|
_commentclose = re.compile(r'--\s*>')
|
||||||
|
_markedsectionclose = re.compile(r']\s*]\s*>')
|
||||||
|
|
||||||
|
# An analysis of the MS-Word extensions is available at
|
||||||
|
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
||||||
|
|
||||||
|
_msmarkedsectionclose = re.compile(r']\s*>')
|
||||||
|
|
||||||
|
del re
|
||||||
|
|
||||||
|
|
||||||
|
class ParserBase(object):
|
||||||
|
"""Parser base class which provides some common support methods used
|
||||||
|
by the SGML/HTML and XHTML parsers."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
if self.__class__ is ParserBase:
|
||||||
|
raise RuntimeError(
|
||||||
|
"_markupbase.ParserBase must be subclassed")
|
||||||
|
|
||||||
|
def error(self, message):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"subclasses of ParserBase must override error()")
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.lineno = 1
|
||||||
|
self.offset = 0
|
||||||
|
|
||||||
|
def getpos(self):
|
||||||
|
"""Return current line number and offset."""
|
||||||
|
return self.lineno, self.offset
|
||||||
|
|
||||||
|
# Internal -- update line number and offset. This should be
|
||||||
|
# called for each piece of data exactly once, in order -- in other
|
||||||
|
# words the concatenation of all the input strings to this
|
||||||
|
# function should be exactly the entire input.
|
||||||
|
def updatepos(self, i, j):
|
||||||
|
if i >= j:
|
||||||
|
return j
|
||||||
|
rawdata = self.rawdata
|
||||||
|
nlines = rawdata.count("\n", i, j)
|
||||||
|
if nlines:
|
||||||
|
self.lineno = self.lineno + nlines
|
||||||
|
pos = rawdata.rindex("\n", i, j) # Should not fail
|
||||||
|
self.offset = j-(pos+1)
|
||||||
|
else:
|
||||||
|
self.offset = self.offset + j-i
|
||||||
|
return j
|
||||||
|
|
||||||
|
_decl_otherchars = ''
|
||||||
|
|
||||||
|
# Internal -- parse declaration (for use by subclasses).
|
||||||
|
def parse_declaration(self, i):
|
||||||
|
# This is some sort of declaration; in "HTML as
|
||||||
|
# deployed," this should only be the document type
|
||||||
|
# declaration ("<!DOCTYPE html...>").
|
||||||
|
# ISO 8879:1986, however, has more complex
|
||||||
|
# declaration syntax for elements in <!...>, including:
|
||||||
|
# --comment--
|
||||||
|
# [marked section]
|
||||||
|
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
||||||
|
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
||||||
|
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
||||||
|
rawdata = self.rawdata
|
||||||
|
j = i + 2
|
||||||
|
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
||||||
|
if rawdata[j:j+1] == ">":
|
||||||
|
# the empty comment <!>
|
||||||
|
return j + 1
|
||||||
|
if rawdata[j:j+1] in ("-", ""):
|
||||||
|
# Start of comment followed by buffer boundary,
|
||||||
|
# or just a buffer boundary.
|
||||||
|
return -1
|
||||||
|
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
||||||
|
n = len(rawdata)
|
||||||
|
if rawdata[j:j+2] == '--': #comment
|
||||||
|
# Locate --.*-- as the body of the comment
|
||||||
|
return self.parse_comment(i)
|
||||||
|
elif rawdata[j] == '[': #marked section
|
||||||
|
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
||||||
|
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
||||||
|
# Note that this is extended by Microsoft Office "Save as Web" function
|
||||||
|
# to include [if...] and [endif].
|
||||||
|
return self.parse_marked_section(i)
|
||||||
|
else: #all other declaration elements
|
||||||
|
decltype, j = self._scan_name(j, i)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
if decltype == "doctype":
|
||||||
|
self._decl_otherchars = ''
|
||||||
|
while j < n:
|
||||||
|
c = rawdata[j]
|
||||||
|
if c == ">":
|
||||||
|
# end of declaration syntax
|
||||||
|
data = rawdata[i+2:j]
|
||||||
|
if decltype == "doctype":
|
||||||
|
self.handle_decl(data)
|
||||||
|
else:
|
||||||
|
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
||||||
|
# comment state" and "8.2.4.45 Markup declaration open
|
||||||
|
# state", a comment token should be emitted.
|
||||||
|
# Calling unknown_decl provides more flexibility though.
|
||||||
|
self.unknown_decl(data)
|
||||||
|
return j + 1
|
||||||
|
if c in "\"'":
|
||||||
|
m = _declstringlit_match(rawdata, j)
|
||||||
|
if not m:
|
||||||
|
return -1 # incomplete
|
||||||
|
j = m.end()
|
||||||
|
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
||||||
|
name, j = self._scan_name(j, i)
|
||||||
|
elif c in self._decl_otherchars:
|
||||||
|
j = j + 1
|
||||||
|
elif c == "[":
|
||||||
|
# this could be handled in a separate doctype parser
|
||||||
|
if decltype == "doctype":
|
||||||
|
j = self._parse_doctype_subset(j + 1, i)
|
||||||
|
elif decltype in set(["attlist", "linktype", "link", "element"]):
|
||||||
|
# must tolerate []'d groups in a content model in an element declaration
|
||||||
|
# also in data attribute specifications of attlist declaration
|
||||||
|
# also link type declaration subsets in linktype declarations
|
||||||
|
# also link attribute specification lists in link declarations
|
||||||
|
self.error("unsupported '[' char in %s declaration" % decltype)
|
||||||
|
else:
|
||||||
|
self.error("unexpected '[' char in declaration")
|
||||||
|
else:
|
||||||
|
self.error(
|
||||||
|
"unexpected %r char in declaration" % rawdata[j])
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
return -1 # incomplete
|
||||||
|
|
||||||
|
# Internal -- parse a marked section
|
||||||
|
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
||||||
|
def parse_marked_section(self, i, report=1):
|
||||||
|
rawdata= self.rawdata
|
||||||
|
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
||||||
|
sectName, j = self._scan_name( i+3, i )
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
if sectName in set(["temp", "cdata", "ignore", "include", "rcdata"]):
|
||||||
|
# look for standard ]]> ending
|
||||||
|
match= _markedsectionclose.search(rawdata, i+3)
|
||||||
|
elif sectName in set(["if", "else", "endif"]):
|
||||||
|
# look for MS Office ]> ending
|
||||||
|
match= _msmarkedsectionclose.search(rawdata, i+3)
|
||||||
|
else:
|
||||||
|
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
|
||||||
|
if not match:
|
||||||
|
return -1
|
||||||
|
if report:
|
||||||
|
j = match.start(0)
|
||||||
|
self.unknown_decl(rawdata[i+3: j])
|
||||||
|
return match.end(0)
|
||||||
|
|
||||||
|
# Internal -- parse comment, return length or -1 if not terminated
|
||||||
|
def parse_comment(self, i, report=1):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
if rawdata[i:i+4] != '<!--':
|
||||||
|
self.error('unexpected call to parse_comment()')
|
||||||
|
match = _commentclose.search(rawdata, i+4)
|
||||||
|
if not match:
|
||||||
|
return -1
|
||||||
|
if report:
|
||||||
|
j = match.start(0)
|
||||||
|
self.handle_comment(rawdata[i+4: j])
|
||||||
|
return match.end(0)
|
||||||
|
|
||||||
|
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
||||||
|
# returning the index just past any whitespace following the trailing ']'.
|
||||||
|
def _parse_doctype_subset(self, i, declstartpos):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
n = len(rawdata)
|
||||||
|
j = i
|
||||||
|
while j < n:
|
||||||
|
c = rawdata[j]
|
||||||
|
if c == "<":
|
||||||
|
s = rawdata[j:j+2]
|
||||||
|
if s == "<":
|
||||||
|
# end of buffer; incomplete
|
||||||
|
return -1
|
||||||
|
if s != "<!":
|
||||||
|
self.updatepos(declstartpos, j + 1)
|
||||||
|
self.error("unexpected char in internal subset (in %r)" % s)
|
||||||
|
if (j + 2) == n:
|
||||||
|
# end of buffer; incomplete
|
||||||
|
return -1
|
||||||
|
if (j + 4) > n:
|
||||||
|
# end of buffer; incomplete
|
||||||
|
return -1
|
||||||
|
if rawdata[j:j+4] == "<!--":
|
||||||
|
j = self.parse_comment(j, report=0)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
continue
|
||||||
|
name, j = self._scan_name(j + 2, declstartpos)
|
||||||
|
if j == -1:
|
||||||
|
return -1
|
||||||
|
if name not in set(["attlist", "element", "entity", "notation"]):
|
||||||
|
self.updatepos(declstartpos, j + 2)
|
||||||
|
self.error(
|
||||||
|
"unknown declaration %r in internal subset" % name)
|
||||||
|
# handle the individual names
|
||||||
|
meth = getattr(self, "_parse_doctype_" + name)
|
||||||
|
j = meth(j, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
elif c == "%":
|
||||||
|
# parameter entity reference
|
||||||
|
if (j + 1) == n:
|
||||||
|
# end of buffer; incomplete
|
||||||
|
return -1
|
||||||
|
s, j = self._scan_name(j + 1, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
if rawdata[j] == ";":
|
||||||
|
j = j + 1
|
||||||
|
elif c == "]":
|
||||||
|
j = j + 1
|
||||||
|
while j < n and rawdata[j].isspace():
|
||||||
|
j = j + 1
|
||||||
|
if j < n:
|
||||||
|
if rawdata[j] == ">":
|
||||||
|
return j
|
||||||
|
self.updatepos(declstartpos, j)
|
||||||
|
self.error("unexpected char after internal subset")
|
||||||
|
else:
|
||||||
|
return -1
|
||||||
|
elif c.isspace():
|
||||||
|
j = j + 1
|
||||||
|
else:
|
||||||
|
self.updatepos(declstartpos, j)
|
||||||
|
self.error("unexpected char %r in internal subset" % c)
|
||||||
|
# end of buffer reached
|
||||||
|
return -1
|
||||||
|
|
||||||
|
# Internal -- scan past <!ELEMENT declarations
|
||||||
|
def _parse_doctype_element(self, i, declstartpos):
|
||||||
|
name, j = self._scan_name(i, declstartpos)
|
||||||
|
if j == -1:
|
||||||
|
return -1
|
||||||
|
# style content model; just skip until '>'
|
||||||
|
rawdata = self.rawdata
|
||||||
|
if '>' in rawdata[j:]:
|
||||||
|
return rawdata.find(">", j) + 1
|
||||||
|
return -1
|
||||||
|
|
||||||
|
# Internal -- scan past <!ATTLIST declarations
|
||||||
|
def _parse_doctype_attlist(self, i, declstartpos):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
name, j = self._scan_name(i, declstartpos)
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if c == "":
|
||||||
|
return -1
|
||||||
|
if c == ">":
|
||||||
|
return j + 1
|
||||||
|
while 1:
|
||||||
|
# scan a series of attribute descriptions; simplified:
|
||||||
|
# name type [value] [#constraint]
|
||||||
|
name, j = self._scan_name(j, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if c == "":
|
||||||
|
return -1
|
||||||
|
if c == "(":
|
||||||
|
# an enumerated type; look for ')'
|
||||||
|
if ")" in rawdata[j:]:
|
||||||
|
j = rawdata.find(")", j) + 1
|
||||||
|
else:
|
||||||
|
return -1
|
||||||
|
while rawdata[j:j+1].isspace():
|
||||||
|
j = j + 1
|
||||||
|
if not rawdata[j:]:
|
||||||
|
# end of buffer, incomplete
|
||||||
|
return -1
|
||||||
|
else:
|
||||||
|
name, j = self._scan_name(j, declstartpos)
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
return -1
|
||||||
|
if c in "'\"":
|
||||||
|
m = _declstringlit_match(rawdata, j)
|
||||||
|
if m:
|
||||||
|
j = m.end()
|
||||||
|
else:
|
||||||
|
return -1
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
return -1
|
||||||
|
if c == "#":
|
||||||
|
if rawdata[j:] == "#":
|
||||||
|
# end of buffer
|
||||||
|
return -1
|
||||||
|
name, j = self._scan_name(j + 1, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
return -1
|
||||||
|
if c == '>':
|
||||||
|
# all done
|
||||||
|
return j + 1
|
||||||
|
|
||||||
|
# Internal -- scan past <!NOTATION declarations
|
||||||
|
def _parse_doctype_notation(self, i, declstartpos):
|
||||||
|
name, j = self._scan_name(i, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
rawdata = self.rawdata
|
||||||
|
while 1:
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
# end of buffer; incomplete
|
||||||
|
return -1
|
||||||
|
if c == '>':
|
||||||
|
return j + 1
|
||||||
|
if c in "'\"":
|
||||||
|
m = _declstringlit_match(rawdata, j)
|
||||||
|
if not m:
|
||||||
|
return -1
|
||||||
|
j = m.end()
|
||||||
|
else:
|
||||||
|
name, j = self._scan_name(j, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
|
||||||
|
# Internal -- scan past <!ENTITY declarations
|
||||||
|
def _parse_doctype_entity(self, i, declstartpos):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
if rawdata[i:i+1] == "%":
|
||||||
|
j = i + 1
|
||||||
|
while 1:
|
||||||
|
c = rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
return -1
|
||||||
|
if c.isspace():
|
||||||
|
j = j + 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
j = i
|
||||||
|
name, j = self._scan_name(j, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
while 1:
|
||||||
|
c = self.rawdata[j:j+1]
|
||||||
|
if not c:
|
||||||
|
return -1
|
||||||
|
if c in "'\"":
|
||||||
|
m = _declstringlit_match(rawdata, j)
|
||||||
|
if m:
|
||||||
|
j = m.end()
|
||||||
|
else:
|
||||||
|
return -1 # incomplete
|
||||||
|
elif c == ">":
|
||||||
|
return j + 1
|
||||||
|
else:
|
||||||
|
name, j = self._scan_name(j, declstartpos)
|
||||||
|
if j < 0:
|
||||||
|
return j
|
||||||
|
|
||||||
|
# Internal -- scan a name token and the new position and the token, or
|
||||||
|
# return -1 if we've reached the end of the buffer.
|
||||||
|
def _scan_name(self, i, declstartpos):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
n = len(rawdata)
|
||||||
|
if i == n:
|
||||||
|
return None, -1
|
||||||
|
m = _declname_match(rawdata, i)
|
||||||
|
if m:
|
||||||
|
s = m.group()
|
||||||
|
name = s.strip()
|
||||||
|
if (i + len(s)) == n:
|
||||||
|
return None, -1 # end of buffer
|
||||||
|
return name.lower(), m.end()
|
||||||
|
else:
|
||||||
|
self.updatepos(declstartpos, i)
|
||||||
|
self.error("expected name token at %r"
|
||||||
|
% rawdata[declstartpos:declstartpos+20])
|
||||||
|
|
||||||
|
# To be overridden -- handlers for unknown objects
|
||||||
|
def unknown_decl(self, data):
|
||||||
|
pass
|
2152
venv/Lib/site-packages/future/backports/datetime.py
Normal file
2152
venv/Lib/site-packages/future/backports/datetime.py
Normal file
File diff suppressed because it is too large
Load Diff
78
venv/Lib/site-packages/future/backports/email/__init__.py
Normal file
78
venv/Lib/site-packages/future/backports/email/__init__.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright (C) 2001-2007 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""
|
||||||
|
Backport of the Python 3.3 email package for Python-Future.
|
||||||
|
|
||||||
|
A package for parsing, handling, and generating email messages.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
# Install the surrogate escape handler here because this is used by many
|
||||||
|
# modules in the email package.
|
||||||
|
from future.utils import surrogateescape
|
||||||
|
surrogateescape.register_surrogateescape()
|
||||||
|
# (Should this be done globally by ``future``?)
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = '5.1.0'
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'base64mime',
|
||||||
|
'charset',
|
||||||
|
'encoders',
|
||||||
|
'errors',
|
||||||
|
'feedparser',
|
||||||
|
'generator',
|
||||||
|
'header',
|
||||||
|
'iterators',
|
||||||
|
'message',
|
||||||
|
'message_from_file',
|
||||||
|
'message_from_binary_file',
|
||||||
|
'message_from_string',
|
||||||
|
'message_from_bytes',
|
||||||
|
'mime',
|
||||||
|
'parser',
|
||||||
|
'quoprimime',
|
||||||
|
'utils',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Some convenience routines. Don't import Parser and Message as side-effects
|
||||||
|
# of importing email since those cascadingly import most of the rest of the
|
||||||
|
# email package.
|
||||||
|
def message_from_string(s, *args, **kws):
|
||||||
|
"""Parse a string into a Message object model.
|
||||||
|
|
||||||
|
Optional _class and strict are passed to the Parser constructor.
|
||||||
|
"""
|
||||||
|
from future.backports.email.parser import Parser
|
||||||
|
return Parser(*args, **kws).parsestr(s)
|
||||||
|
|
||||||
|
def message_from_bytes(s, *args, **kws):
|
||||||
|
"""Parse a bytes string into a Message object model.
|
||||||
|
|
||||||
|
Optional _class and strict are passed to the Parser constructor.
|
||||||
|
"""
|
||||||
|
from future.backports.email.parser import BytesParser
|
||||||
|
return BytesParser(*args, **kws).parsebytes(s)
|
||||||
|
|
||||||
|
def message_from_file(fp, *args, **kws):
|
||||||
|
"""Read a file and parse its contents into a Message object model.
|
||||||
|
|
||||||
|
Optional _class and strict are passed to the Parser constructor.
|
||||||
|
"""
|
||||||
|
from future.backports.email.parser import Parser
|
||||||
|
return Parser(*args, **kws).parse(fp)
|
||||||
|
|
||||||
|
def message_from_binary_file(fp, *args, **kws):
|
||||||
|
"""Read a binary file and parse its contents into a Message object model.
|
||||||
|
|
||||||
|
Optional _class and strict are passed to the Parser constructor.
|
||||||
|
"""
|
||||||
|
from future.backports.email.parser import BytesParser
|
||||||
|
return BytesParser(*args, **kws).parse(fp)
|
232
venv/Lib/site-packages/future/backports/email/_encoded_words.py
Normal file
232
venv/Lib/site-packages/future/backports/email/_encoded_words.py
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
""" Routines for manipulating RFC2047 encoded words.
|
||||||
|
|
||||||
|
This is currently a package-private API, but will be considered for promotion
|
||||||
|
to a public API if there is demand.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import bytes
|
||||||
|
from future.builtins import chr
|
||||||
|
from future.builtins import int
|
||||||
|
from future.builtins import str
|
||||||
|
|
||||||
|
# An ecoded word looks like this:
|
||||||
|
#
|
||||||
|
# =?charset[*lang]?cte?encoded_string?=
|
||||||
|
#
|
||||||
|
# for more information about charset see the charset module. Here it is one
|
||||||
|
# of the preferred MIME charset names (hopefully; you never know when parsing).
|
||||||
|
# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In
|
||||||
|
# theory other letters could be used for other encodings, but in practice this
|
||||||
|
# (almost?) never happens. There could be a public API for adding entries
|
||||||
|
# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is
|
||||||
|
# Base64. The meaning of encoded_string should be obvious. 'lang' is optional
|
||||||
|
# as indicated by the brackets (they are not part of the syntax) but is almost
|
||||||
|
# never encountered in practice.
|
||||||
|
#
|
||||||
|
# The general interface for a CTE decoder is that it takes the encoded_string
|
||||||
|
# as its argument, and returns a tuple (cte_decoded_string, defects). The
|
||||||
|
# cte_decoded_string is the original binary that was encoded using the
|
||||||
|
# specified cte. 'defects' is a list of MessageDefect instances indicating any
|
||||||
|
# problems encountered during conversion. 'charset' and 'lang' are the
|
||||||
|
# corresponding strings extracted from the EW, case preserved.
|
||||||
|
#
|
||||||
|
# The general interface for a CTE encoder is that it takes a binary sequence
|
||||||
|
# as input and returns the cte_encoded_string, which is an ascii-only string.
|
||||||
|
#
|
||||||
|
# Each decoder must also supply a length function that takes the binary
|
||||||
|
# sequence as its argument and returns the length of the resulting encoded
|
||||||
|
# string.
|
||||||
|
#
|
||||||
|
# The main API functions for the module are decode, which calls the decoder
|
||||||
|
# referenced by the cte specifier, and encode, which adds the appropriate
|
||||||
|
# RFC 2047 "chrome" to the encoded string, and can optionally automatically
|
||||||
|
# select the shortest possible encoding. See their docstrings below for
|
||||||
|
# details.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
import functools
|
||||||
|
from string import ascii_letters, digits
|
||||||
|
from future.backports.email import errors
|
||||||
|
|
||||||
|
__all__ = ['decode_q',
|
||||||
|
'encode_q',
|
||||||
|
'decode_b',
|
||||||
|
'encode_b',
|
||||||
|
'len_q',
|
||||||
|
'len_b',
|
||||||
|
'decode',
|
||||||
|
'encode',
|
||||||
|
]
|
||||||
|
|
||||||
|
#
|
||||||
|
# Quoted Printable
|
||||||
|
#
|
||||||
|
|
||||||
|
# regex based decoder.
|
||||||
|
_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
|
||||||
|
lambda m: bytes([int(m.group(1), 16)]))
|
||||||
|
|
||||||
|
def decode_q(encoded):
|
||||||
|
encoded = bytes(encoded.replace(b'_', b' '))
|
||||||
|
return _q_byte_subber(encoded), []
|
||||||
|
|
||||||
|
|
||||||
|
# dict mapping bytes to their encoded form
|
||||||
|
class _QByteMap(dict):
|
||||||
|
|
||||||
|
safe = bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'))
|
||||||
|
|
||||||
|
def __missing__(self, key):
|
||||||
|
if key in self.safe:
|
||||||
|
self[key] = chr(key)
|
||||||
|
else:
|
||||||
|
self[key] = "={:02X}".format(key)
|
||||||
|
return self[key]
|
||||||
|
|
||||||
|
_q_byte_map = _QByteMap()
|
||||||
|
|
||||||
|
# In headers spaces are mapped to '_'.
|
||||||
|
_q_byte_map[ord(' ')] = '_'
|
||||||
|
|
||||||
|
def encode_q(bstring):
|
||||||
|
return str(''.join(_q_byte_map[x] for x in bytes(bstring)))
|
||||||
|
|
||||||
|
def len_q(bstring):
|
||||||
|
return sum(len(_q_byte_map[x]) for x in bytes(bstring))
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Base64
|
||||||
|
#
|
||||||
|
|
||||||
|
def decode_b(encoded):
|
||||||
|
defects = []
|
||||||
|
pad_err = len(encoded) % 4
|
||||||
|
if pad_err:
|
||||||
|
defects.append(errors.InvalidBase64PaddingDefect())
|
||||||
|
padded_encoded = encoded + b'==='[:4-pad_err]
|
||||||
|
else:
|
||||||
|
padded_encoded = encoded
|
||||||
|
try:
|
||||||
|
# The validate kwarg to b64decode is not supported in Py2.x
|
||||||
|
if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', padded_encoded):
|
||||||
|
raise binascii.Error('Non-base64 digit found')
|
||||||
|
return base64.b64decode(padded_encoded), defects
|
||||||
|
except binascii.Error:
|
||||||
|
# Since we had correct padding, this must an invalid char error.
|
||||||
|
defects = [errors.InvalidBase64CharactersDefect()]
|
||||||
|
# The non-alphabet characters are ignored as far as padding
|
||||||
|
# goes, but we don't know how many there are. So we'll just
|
||||||
|
# try various padding lengths until something works.
|
||||||
|
for i in 0, 1, 2, 3:
|
||||||
|
try:
|
||||||
|
return base64.b64decode(encoded+b'='*i), defects
|
||||||
|
except (binascii.Error, TypeError): # Py2 raises a TypeError
|
||||||
|
if i==0:
|
||||||
|
defects.append(errors.InvalidBase64PaddingDefect())
|
||||||
|
else:
|
||||||
|
# This should never happen.
|
||||||
|
raise AssertionError("unexpected binascii.Error")
|
||||||
|
|
||||||
|
def encode_b(bstring):
|
||||||
|
return base64.b64encode(bstring).decode('ascii')
|
||||||
|
|
||||||
|
def len_b(bstring):
|
||||||
|
groups_of_3, leftover = divmod(len(bstring), 3)
|
||||||
|
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||||
|
return groups_of_3 * 4 + (4 if leftover else 0)
|
||||||
|
|
||||||
|
|
||||||
|
_cte_decoders = {
|
||||||
|
'q': decode_q,
|
||||||
|
'b': decode_b,
|
||||||
|
}
|
||||||
|
|
||||||
|
def decode(ew):
|
||||||
|
"""Decode encoded word and return (string, charset, lang, defects) tuple.
|
||||||
|
|
||||||
|
An RFC 2047/2243 encoded word has the form:
|
||||||
|
|
||||||
|
=?charset*lang?cte?encoded_string?=
|
||||||
|
|
||||||
|
where '*lang' may be omitted but the other parts may not be.
|
||||||
|
|
||||||
|
This function expects exactly such a string (that is, it does not check the
|
||||||
|
syntax and may raise errors if the string is not well formed), and returns
|
||||||
|
the encoded_string decoded first from its Content Transfer Encoding and
|
||||||
|
then from the resulting bytes into unicode using the specified charset. If
|
||||||
|
the cte-decoded string does not successfully decode using the specified
|
||||||
|
character set, a defect is added to the defects list and the unknown octets
|
||||||
|
are replaced by the unicode 'unknown' character \uFDFF.
|
||||||
|
|
||||||
|
The specified charset and language are returned. The default for language,
|
||||||
|
which is rarely if ever encountered, is the empty string.
|
||||||
|
|
||||||
|
"""
|
||||||
|
_, charset, cte, cte_string, _ = str(ew).split('?')
|
||||||
|
charset, _, lang = charset.partition('*')
|
||||||
|
cte = cte.lower()
|
||||||
|
# Recover the original bytes and do CTE decoding.
|
||||||
|
bstring = cte_string.encode('ascii', 'surrogateescape')
|
||||||
|
bstring, defects = _cte_decoders[cte](bstring)
|
||||||
|
# Turn the CTE decoded bytes into unicode.
|
||||||
|
try:
|
||||||
|
string = bstring.decode(charset)
|
||||||
|
except UnicodeError:
|
||||||
|
defects.append(errors.UndecodableBytesDefect("Encoded word "
|
||||||
|
"contains bytes not decodable using {} charset".format(charset)))
|
||||||
|
string = bstring.decode(charset, 'surrogateescape')
|
||||||
|
except LookupError:
|
||||||
|
string = bstring.decode('ascii', 'surrogateescape')
|
||||||
|
if charset.lower() != 'unknown-8bit':
|
||||||
|
defects.append(errors.CharsetError("Unknown charset {} "
|
||||||
|
"in encoded word; decoded as unknown bytes".format(charset)))
|
||||||
|
return string, charset, lang, defects
|
||||||
|
|
||||||
|
|
||||||
|
_cte_encoders = {
|
||||||
|
'q': encode_q,
|
||||||
|
'b': encode_b,
|
||||||
|
}
|
||||||
|
|
||||||
|
_cte_encode_length = {
|
||||||
|
'q': len_q,
|
||||||
|
'b': len_b,
|
||||||
|
}
|
||||||
|
|
||||||
|
def encode(string, charset='utf-8', encoding=None, lang=''):
|
||||||
|
"""Encode string using the CTE encoding that produces the shorter result.
|
||||||
|
|
||||||
|
Produces an RFC 2047/2243 encoded word of the form:
|
||||||
|
|
||||||
|
=?charset*lang?cte?encoded_string?=
|
||||||
|
|
||||||
|
where '*lang' is omitted unless the 'lang' parameter is given a value.
|
||||||
|
Optional argument charset (defaults to utf-8) specifies the charset to use
|
||||||
|
to encode the string to binary before CTE encoding it. Optional argument
|
||||||
|
'encoding' is the cte specifier for the encoding that should be used ('q'
|
||||||
|
or 'b'); if it is None (the default) the encoding which produces the
|
||||||
|
shortest encoded sequence is used, except that 'q' is preferred if it is up
|
||||||
|
to five characters longer. Optional argument 'lang' (default '') gives the
|
||||||
|
RFC 2243 language string to specify in the encoded word.
|
||||||
|
|
||||||
|
"""
|
||||||
|
string = str(string)
|
||||||
|
if charset == 'unknown-8bit':
|
||||||
|
bstring = string.encode('ascii', 'surrogateescape')
|
||||||
|
else:
|
||||||
|
bstring = string.encode(charset)
|
||||||
|
if encoding is None:
|
||||||
|
qlen = _cte_encode_length['q'](bstring)
|
||||||
|
blen = _cte_encode_length['b'](bstring)
|
||||||
|
# Bias toward q. 5 is arbitrary.
|
||||||
|
encoding = 'q' if qlen - blen < 5 else 'b'
|
||||||
|
encoded = _cte_encoders[encoding](bstring)
|
||||||
|
if lang:
|
||||||
|
lang = '*' + lang
|
||||||
|
return "=?{0}{1}?{2}?{3}?=".format(charset, lang, encoding, encoded)
|
File diff suppressed because it is too large
Load Diff
546
venv/Lib/site-packages/future/backports/email/_parseaddr.py
Normal file
546
venv/Lib/site-packages/future/backports/email/_parseaddr.py
Normal file
@ -0,0 +1,546 @@
|
|||||||
|
# Copyright (C) 2002-2007 Python Software Foundation
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Email address parsing code.
|
||||||
|
|
||||||
|
Lifted directly from rfc822.py. This should eventually be rewritten.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import int
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'mktime_tz',
|
||||||
|
'parsedate',
|
||||||
|
'parsedate_tz',
|
||||||
|
'quote',
|
||||||
|
]
|
||||||
|
|
||||||
|
import time, calendar
|
||||||
|
|
||||||
|
SPACE = ' '
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
COMMASPACE = ', '
|
||||||
|
|
||||||
|
# Parse a date field
|
||||||
|
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
|
||||||
|
'aug', 'sep', 'oct', 'nov', 'dec',
|
||||||
|
'january', 'february', 'march', 'april', 'may', 'june', 'july',
|
||||||
|
'august', 'september', 'october', 'november', 'december']
|
||||||
|
|
||||||
|
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
|
||||||
|
|
||||||
|
# The timezone table does not include the military time zones defined
|
||||||
|
# in RFC822, other than Z. According to RFC1123, the description in
|
||||||
|
# RFC822 gets the signs wrong, so we can't rely on any such time
|
||||||
|
# zones. RFC1123 recommends that numeric timezone indicators be used
|
||||||
|
# instead of timezone names.
|
||||||
|
|
||||||
|
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
|
||||||
|
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
|
||||||
|
'EST': -500, 'EDT': -400, # Eastern
|
||||||
|
'CST': -600, 'CDT': -500, # Central
|
||||||
|
'MST': -700, 'MDT': -600, # Mountain
|
||||||
|
'PST': -800, 'PDT': -700 # Pacific
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def parsedate_tz(data):
|
||||||
|
"""Convert a date string to a time tuple.
|
||||||
|
|
||||||
|
Accounts for military timezones.
|
||||||
|
"""
|
||||||
|
res = _parsedate_tz(data)
|
||||||
|
if not res:
|
||||||
|
return
|
||||||
|
if res[9] is None:
|
||||||
|
res[9] = 0
|
||||||
|
return tuple(res)
|
||||||
|
|
||||||
|
def _parsedate_tz(data):
|
||||||
|
"""Convert date to extended time tuple.
|
||||||
|
|
||||||
|
The last (additional) element is the time zone offset in seconds, except if
|
||||||
|
the timezone was specified as -0000. In that case the last element is
|
||||||
|
None. This indicates a UTC timestamp that explicitly declaims knowledge of
|
||||||
|
the source timezone, as opposed to a +0000 timestamp that indicates the
|
||||||
|
source timezone really was UTC.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not data:
|
||||||
|
return
|
||||||
|
data = data.split()
|
||||||
|
# The FWS after the comma after the day-of-week is optional, so search and
|
||||||
|
# adjust for this.
|
||||||
|
if data[0].endswith(',') or data[0].lower() in _daynames:
|
||||||
|
# There's a dayname here. Skip it
|
||||||
|
del data[0]
|
||||||
|
else:
|
||||||
|
i = data[0].rfind(',')
|
||||||
|
if i >= 0:
|
||||||
|
data[0] = data[0][i+1:]
|
||||||
|
if len(data) == 3: # RFC 850 date, deprecated
|
||||||
|
stuff = data[0].split('-')
|
||||||
|
if len(stuff) == 3:
|
||||||
|
data = stuff + data[1:]
|
||||||
|
if len(data) == 4:
|
||||||
|
s = data[3]
|
||||||
|
i = s.find('+')
|
||||||
|
if i == -1:
|
||||||
|
i = s.find('-')
|
||||||
|
if i > 0:
|
||||||
|
data[3:] = [s[:i], s[i:]]
|
||||||
|
else:
|
||||||
|
data.append('') # Dummy tz
|
||||||
|
if len(data) < 5:
|
||||||
|
return None
|
||||||
|
data = data[:5]
|
||||||
|
[dd, mm, yy, tm, tz] = data
|
||||||
|
mm = mm.lower()
|
||||||
|
if mm not in _monthnames:
|
||||||
|
dd, mm = mm, dd.lower()
|
||||||
|
if mm not in _monthnames:
|
||||||
|
return None
|
||||||
|
mm = _monthnames.index(mm) + 1
|
||||||
|
if mm > 12:
|
||||||
|
mm -= 12
|
||||||
|
if dd[-1] == ',':
|
||||||
|
dd = dd[:-1]
|
||||||
|
i = yy.find(':')
|
||||||
|
if i > 0:
|
||||||
|
yy, tm = tm, yy
|
||||||
|
if yy[-1] == ',':
|
||||||
|
yy = yy[:-1]
|
||||||
|
if not yy[0].isdigit():
|
||||||
|
yy, tz = tz, yy
|
||||||
|
if tm[-1] == ',':
|
||||||
|
tm = tm[:-1]
|
||||||
|
tm = tm.split(':')
|
||||||
|
if len(tm) == 2:
|
||||||
|
[thh, tmm] = tm
|
||||||
|
tss = '0'
|
||||||
|
elif len(tm) == 3:
|
||||||
|
[thh, tmm, tss] = tm
|
||||||
|
elif len(tm) == 1 and '.' in tm[0]:
|
||||||
|
# Some non-compliant MUAs use '.' to separate time elements.
|
||||||
|
tm = tm[0].split('.')
|
||||||
|
if len(tm) == 2:
|
||||||
|
[thh, tmm] = tm
|
||||||
|
tss = 0
|
||||||
|
elif len(tm) == 3:
|
||||||
|
[thh, tmm, tss] = tm
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
yy = int(yy)
|
||||||
|
dd = int(dd)
|
||||||
|
thh = int(thh)
|
||||||
|
tmm = int(tmm)
|
||||||
|
tss = int(tss)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
# Check for a yy specified in two-digit format, then convert it to the
|
||||||
|
# appropriate four-digit format, according to the POSIX standard. RFC 822
|
||||||
|
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
|
||||||
|
# mandates a 4-digit yy. For more information, see the documentation for
|
||||||
|
# the time module.
|
||||||
|
if yy < 100:
|
||||||
|
# The year is between 1969 and 1999 (inclusive).
|
||||||
|
if yy > 68:
|
||||||
|
yy += 1900
|
||||||
|
# The year is between 2000 and 2068 (inclusive).
|
||||||
|
else:
|
||||||
|
yy += 2000
|
||||||
|
tzoffset = None
|
||||||
|
tz = tz.upper()
|
||||||
|
if tz in _timezones:
|
||||||
|
tzoffset = _timezones[tz]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
tzoffset = int(tz)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
if tzoffset==0 and tz.startswith('-'):
|
||||||
|
tzoffset = None
|
||||||
|
# Convert a timezone offset into seconds ; -0500 -> -18000
|
||||||
|
if tzoffset:
|
||||||
|
if tzoffset < 0:
|
||||||
|
tzsign = -1
|
||||||
|
tzoffset = -tzoffset
|
||||||
|
else:
|
||||||
|
tzsign = 1
|
||||||
|
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
|
||||||
|
# Daylight Saving Time flag is set to -1, since DST is unknown.
|
||||||
|
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
|
||||||
|
|
||||||
|
|
||||||
|
def parsedate(data):
|
||||||
|
"""Convert a time string to a time tuple."""
|
||||||
|
t = parsedate_tz(data)
|
||||||
|
if isinstance(t, tuple):
|
||||||
|
return t[:9]
|
||||||
|
else:
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
def mktime_tz(data):
|
||||||
|
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
|
||||||
|
if data[9] is None:
|
||||||
|
# No zone info, so localtime is better assumption than GMT
|
||||||
|
return time.mktime(data[:8] + (-1,))
|
||||||
|
else:
|
||||||
|
t = calendar.timegm(data)
|
||||||
|
return t - data[9]
|
||||||
|
|
||||||
|
|
||||||
|
def quote(str):
|
||||||
|
"""Prepare string to be used in a quoted string.
|
||||||
|
|
||||||
|
Turns backslash and double quote characters into quoted pairs. These
|
||||||
|
are the only characters that need to be quoted inside a quoted string.
|
||||||
|
Does not add the surrounding double quotes.
|
||||||
|
"""
|
||||||
|
return str.replace('\\', '\\\\').replace('"', '\\"')
|
||||||
|
|
||||||
|
|
||||||
|
class AddrlistClass(object):
|
||||||
|
"""Address parser class by Ben Escoto.
|
||||||
|
|
||||||
|
To understand what this class does, it helps to have a copy of RFC 2822 in
|
||||||
|
front of you.
|
||||||
|
|
||||||
|
Note: this class interface is deprecated and may be removed in the future.
|
||||||
|
Use email.utils.AddressList instead.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, field):
|
||||||
|
"""Initialize a new instance.
|
||||||
|
|
||||||
|
`field' is an unparsed address header field, containing
|
||||||
|
one or more addresses.
|
||||||
|
"""
|
||||||
|
self.specials = '()<>@,:;.\"[]'
|
||||||
|
self.pos = 0
|
||||||
|
self.LWS = ' \t'
|
||||||
|
self.CR = '\r\n'
|
||||||
|
self.FWS = self.LWS + self.CR
|
||||||
|
self.atomends = self.specials + self.LWS + self.CR
|
||||||
|
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
|
||||||
|
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
|
||||||
|
# syntax, so allow dots in phrases.
|
||||||
|
self.phraseends = self.atomends.replace('.', '')
|
||||||
|
self.field = field
|
||||||
|
self.commentlist = []
|
||||||
|
|
||||||
|
def gotonext(self):
|
||||||
|
"""Skip white space and extract comments."""
|
||||||
|
wslist = []
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if self.field[self.pos] in self.LWS + '\n\r':
|
||||||
|
if self.field[self.pos] not in '\n\r':
|
||||||
|
wslist.append(self.field[self.pos])
|
||||||
|
self.pos += 1
|
||||||
|
elif self.field[self.pos] == '(':
|
||||||
|
self.commentlist.append(self.getcomment())
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return EMPTYSTRING.join(wslist)
|
||||||
|
|
||||||
|
def getaddrlist(self):
|
||||||
|
"""Parse all addresses.
|
||||||
|
|
||||||
|
Returns a list containing all of the addresses.
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
ad = self.getaddress()
|
||||||
|
if ad:
|
||||||
|
result += ad
|
||||||
|
else:
|
||||||
|
result.append(('', ''))
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getaddress(self):
|
||||||
|
"""Parse the next address."""
|
||||||
|
self.commentlist = []
|
||||||
|
self.gotonext()
|
||||||
|
|
||||||
|
oldpos = self.pos
|
||||||
|
oldcl = self.commentlist
|
||||||
|
plist = self.getphraselist()
|
||||||
|
|
||||||
|
self.gotonext()
|
||||||
|
returnlist = []
|
||||||
|
|
||||||
|
if self.pos >= len(self.field):
|
||||||
|
# Bad email address technically, no domain.
|
||||||
|
if plist:
|
||||||
|
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||||
|
|
||||||
|
elif self.field[self.pos] in '.@':
|
||||||
|
# email address is just an addrspec
|
||||||
|
# this isn't very efficient since we start over
|
||||||
|
self.pos = oldpos
|
||||||
|
self.commentlist = oldcl
|
||||||
|
addrspec = self.getaddrspec()
|
||||||
|
returnlist = [(SPACE.join(self.commentlist), addrspec)]
|
||||||
|
|
||||||
|
elif self.field[self.pos] == ':':
|
||||||
|
# address is a group
|
||||||
|
returnlist = []
|
||||||
|
|
||||||
|
fieldlen = len(self.field)
|
||||||
|
self.pos += 1
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
self.gotonext()
|
||||||
|
if self.pos < fieldlen and self.field[self.pos] == ';':
|
||||||
|
self.pos += 1
|
||||||
|
break
|
||||||
|
returnlist = returnlist + self.getaddress()
|
||||||
|
|
||||||
|
elif self.field[self.pos] == '<':
|
||||||
|
# Address is a phrase then a route addr
|
||||||
|
routeaddr = self.getrouteaddr()
|
||||||
|
|
||||||
|
if self.commentlist:
|
||||||
|
returnlist = [(SPACE.join(plist) + ' (' +
|
||||||
|
' '.join(self.commentlist) + ')', routeaddr)]
|
||||||
|
else:
|
||||||
|
returnlist = [(SPACE.join(plist), routeaddr)]
|
||||||
|
|
||||||
|
else:
|
||||||
|
if plist:
|
||||||
|
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||||
|
elif self.field[self.pos] in self.specials:
|
||||||
|
self.pos += 1
|
||||||
|
|
||||||
|
self.gotonext()
|
||||||
|
if self.pos < len(self.field) and self.field[self.pos] == ',':
|
||||||
|
self.pos += 1
|
||||||
|
return returnlist
|
||||||
|
|
||||||
|
def getrouteaddr(self):
|
||||||
|
"""Parse a route address (Return-path value).
|
||||||
|
|
||||||
|
This method just skips all the route stuff and returns the addrspec.
|
||||||
|
"""
|
||||||
|
if self.field[self.pos] != '<':
|
||||||
|
return
|
||||||
|
|
||||||
|
expectroute = False
|
||||||
|
self.pos += 1
|
||||||
|
self.gotonext()
|
||||||
|
adlist = ''
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if expectroute:
|
||||||
|
self.getdomain()
|
||||||
|
expectroute = False
|
||||||
|
elif self.field[self.pos] == '>':
|
||||||
|
self.pos += 1
|
||||||
|
break
|
||||||
|
elif self.field[self.pos] == '@':
|
||||||
|
self.pos += 1
|
||||||
|
expectroute = True
|
||||||
|
elif self.field[self.pos] == ':':
|
||||||
|
self.pos += 1
|
||||||
|
else:
|
||||||
|
adlist = self.getaddrspec()
|
||||||
|
self.pos += 1
|
||||||
|
break
|
||||||
|
self.gotonext()
|
||||||
|
|
||||||
|
return adlist
|
||||||
|
|
||||||
|
def getaddrspec(self):
|
||||||
|
"""Parse an RFC 2822 addr-spec."""
|
||||||
|
aslist = []
|
||||||
|
|
||||||
|
self.gotonext()
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
preserve_ws = True
|
||||||
|
if self.field[self.pos] == '.':
|
||||||
|
if aslist and not aslist[-1].strip():
|
||||||
|
aslist.pop()
|
||||||
|
aslist.append('.')
|
||||||
|
self.pos += 1
|
||||||
|
preserve_ws = False
|
||||||
|
elif self.field[self.pos] == '"':
|
||||||
|
aslist.append('"%s"' % quote(self.getquote()))
|
||||||
|
elif self.field[self.pos] in self.atomends:
|
||||||
|
if aslist and not aslist[-1].strip():
|
||||||
|
aslist.pop()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
aslist.append(self.getatom())
|
||||||
|
ws = self.gotonext()
|
||||||
|
if preserve_ws and ws:
|
||||||
|
aslist.append(ws)
|
||||||
|
|
||||||
|
if self.pos >= len(self.field) or self.field[self.pos] != '@':
|
||||||
|
return EMPTYSTRING.join(aslist)
|
||||||
|
|
||||||
|
aslist.append('@')
|
||||||
|
self.pos += 1
|
||||||
|
self.gotonext()
|
||||||
|
return EMPTYSTRING.join(aslist) + self.getdomain()
|
||||||
|
|
||||||
|
def getdomain(self):
|
||||||
|
"""Get the complete domain name from an address."""
|
||||||
|
sdlist = []
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if self.field[self.pos] in self.LWS:
|
||||||
|
self.pos += 1
|
||||||
|
elif self.field[self.pos] == '(':
|
||||||
|
self.commentlist.append(self.getcomment())
|
||||||
|
elif self.field[self.pos] == '[':
|
||||||
|
sdlist.append(self.getdomainliteral())
|
||||||
|
elif self.field[self.pos] == '.':
|
||||||
|
self.pos += 1
|
||||||
|
sdlist.append('.')
|
||||||
|
elif self.field[self.pos] in self.atomends:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
sdlist.append(self.getatom())
|
||||||
|
return EMPTYSTRING.join(sdlist)
|
||||||
|
|
||||||
|
def getdelimited(self, beginchar, endchars, allowcomments=True):
|
||||||
|
"""Parse a header fragment delimited by special characters.
|
||||||
|
|
||||||
|
`beginchar' is the start character for the fragment.
|
||||||
|
If self is not looking at an instance of `beginchar' then
|
||||||
|
getdelimited returns the empty string.
|
||||||
|
|
||||||
|
`endchars' is a sequence of allowable end-delimiting characters.
|
||||||
|
Parsing stops when one of these is encountered.
|
||||||
|
|
||||||
|
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
|
||||||
|
within the parsed fragment.
|
||||||
|
"""
|
||||||
|
if self.field[self.pos] != beginchar:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
slist = ['']
|
||||||
|
quote = False
|
||||||
|
self.pos += 1
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if quote:
|
||||||
|
slist.append(self.field[self.pos])
|
||||||
|
quote = False
|
||||||
|
elif self.field[self.pos] in endchars:
|
||||||
|
self.pos += 1
|
||||||
|
break
|
||||||
|
elif allowcomments and self.field[self.pos] == '(':
|
||||||
|
slist.append(self.getcomment())
|
||||||
|
continue # have already advanced pos from getcomment
|
||||||
|
elif self.field[self.pos] == '\\':
|
||||||
|
quote = True
|
||||||
|
else:
|
||||||
|
slist.append(self.field[self.pos])
|
||||||
|
self.pos += 1
|
||||||
|
|
||||||
|
return EMPTYSTRING.join(slist)
|
||||||
|
|
||||||
|
def getquote(self):
|
||||||
|
"""Get a quote-delimited fragment from self's field."""
|
||||||
|
return self.getdelimited('"', '"\r', False)
|
||||||
|
|
||||||
|
def getcomment(self):
|
||||||
|
"""Get a parenthesis-delimited fragment from self's field."""
|
||||||
|
return self.getdelimited('(', ')\r', True)
|
||||||
|
|
||||||
|
def getdomainliteral(self):
|
||||||
|
"""Parse an RFC 2822 domain-literal."""
|
||||||
|
return '[%s]' % self.getdelimited('[', ']\r', False)
|
||||||
|
|
||||||
|
def getatom(self, atomends=None):
|
||||||
|
"""Parse an RFC 2822 atom.
|
||||||
|
|
||||||
|
Optional atomends specifies a different set of end token delimiters
|
||||||
|
(the default is to use self.atomends). This is used e.g. in
|
||||||
|
getphraselist() since phrase endings must not include the `.' (which
|
||||||
|
is legal in phrases)."""
|
||||||
|
atomlist = ['']
|
||||||
|
if atomends is None:
|
||||||
|
atomends = self.atomends
|
||||||
|
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if self.field[self.pos] in atomends:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
atomlist.append(self.field[self.pos])
|
||||||
|
self.pos += 1
|
||||||
|
|
||||||
|
return EMPTYSTRING.join(atomlist)
|
||||||
|
|
||||||
|
def getphraselist(self):
|
||||||
|
"""Parse a sequence of RFC 2822 phrases.
|
||||||
|
|
||||||
|
A phrase is a sequence of words, which are in turn either RFC 2822
|
||||||
|
atoms or quoted-strings. Phrases are canonicalized by squeezing all
|
||||||
|
runs of continuous whitespace into one space.
|
||||||
|
"""
|
||||||
|
plist = []
|
||||||
|
|
||||||
|
while self.pos < len(self.field):
|
||||||
|
if self.field[self.pos] in self.FWS:
|
||||||
|
self.pos += 1
|
||||||
|
elif self.field[self.pos] == '"':
|
||||||
|
plist.append(self.getquote())
|
||||||
|
elif self.field[self.pos] == '(':
|
||||||
|
self.commentlist.append(self.getcomment())
|
||||||
|
elif self.field[self.pos] in self.phraseends:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
plist.append(self.getatom(self.phraseends))
|
||||||
|
|
||||||
|
return plist
|
||||||
|
|
||||||
|
class AddressList(AddrlistClass):
|
||||||
|
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
|
||||||
|
def __init__(self, field):
|
||||||
|
AddrlistClass.__init__(self, field)
|
||||||
|
if field:
|
||||||
|
self.addresslist = self.getaddrlist()
|
||||||
|
else:
|
||||||
|
self.addresslist = []
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.addresslist)
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
# Set union
|
||||||
|
newaddr = AddressList(None)
|
||||||
|
newaddr.addresslist = self.addresslist[:]
|
||||||
|
for x in other.addresslist:
|
||||||
|
if not x in self.addresslist:
|
||||||
|
newaddr.addresslist.append(x)
|
||||||
|
return newaddr
|
||||||
|
|
||||||
|
def __iadd__(self, other):
|
||||||
|
# Set union, in-place
|
||||||
|
for x in other.addresslist:
|
||||||
|
if not x in self.addresslist:
|
||||||
|
self.addresslist.append(x)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
# Set difference
|
||||||
|
newaddr = AddressList(None)
|
||||||
|
for x in self.addresslist:
|
||||||
|
if not x in other.addresslist:
|
||||||
|
newaddr.addresslist.append(x)
|
||||||
|
return newaddr
|
||||||
|
|
||||||
|
def __isub__(self, other):
|
||||||
|
# Set difference, in-place
|
||||||
|
for x in other.addresslist:
|
||||||
|
if x in self.addresslist:
|
||||||
|
self.addresslist.remove(x)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
# Make indexing, slices, and 'in' work
|
||||||
|
return self.addresslist[index]
|
365
venv/Lib/site-packages/future/backports/email/_policybase.py
Normal file
365
venv/Lib/site-packages/future/backports/email/_policybase.py
Normal file
@ -0,0 +1,365 @@
|
|||||||
|
"""Policy framework for the email package.
|
||||||
|
|
||||||
|
Allows fine grained feature control of how the package parses and emits data.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import super
|
||||||
|
from future.builtins import str
|
||||||
|
from future.utils import with_metaclass
|
||||||
|
|
||||||
|
import abc
|
||||||
|
from future.backports.email import header
|
||||||
|
from future.backports.email import charset as _charset
|
||||||
|
from future.backports.email.utils import _has_surrogates
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Policy',
|
||||||
|
'Compat32',
|
||||||
|
'compat32',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class _PolicyBase(object):
|
||||||
|
|
||||||
|
"""Policy Object basic framework.
|
||||||
|
|
||||||
|
This class is useless unless subclassed. A subclass should define
|
||||||
|
class attributes with defaults for any values that are to be
|
||||||
|
managed by the Policy object. The constructor will then allow
|
||||||
|
non-default values to be set for these attributes at instance
|
||||||
|
creation time. The instance will be callable, taking these same
|
||||||
|
attributes keyword arguments, and returning a new instance
|
||||||
|
identical to the called instance except for those values changed
|
||||||
|
by the keyword arguments. Instances may be added, yielding new
|
||||||
|
instances with any non-default values from the right hand
|
||||||
|
operand overriding those in the left hand operand. That is,
|
||||||
|
|
||||||
|
A + B == A(<non-default values of B>)
|
||||||
|
|
||||||
|
The repr of an instance can be used to reconstruct the object
|
||||||
|
if and only if the repr of the values can be used to reconstruct
|
||||||
|
those values.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kw):
|
||||||
|
"""Create new Policy, possibly overriding some defaults.
|
||||||
|
|
||||||
|
See class docstring for a list of overridable attributes.
|
||||||
|
|
||||||
|
"""
|
||||||
|
for name, value in kw.items():
|
||||||
|
if hasattr(self, name):
|
||||||
|
super(_PolicyBase,self).__setattr__(name, value)
|
||||||
|
else:
|
||||||
|
raise TypeError(
|
||||||
|
"{!r} is an invalid keyword argument for {}".format(
|
||||||
|
name, self.__class__.__name__))
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
args = [ "{}={!r}".format(name, value)
|
||||||
|
for name, value in self.__dict__.items() ]
|
||||||
|
return "{}({})".format(self.__class__.__name__, ', '.join(args))
|
||||||
|
|
||||||
|
def clone(self, **kw):
|
||||||
|
"""Return a new instance with specified attributes changed.
|
||||||
|
|
||||||
|
The new instance has the same attribute values as the current object,
|
||||||
|
except for the changes passed in as keyword arguments.
|
||||||
|
|
||||||
|
"""
|
||||||
|
newpolicy = self.__class__.__new__(self.__class__)
|
||||||
|
for attr, value in self.__dict__.items():
|
||||||
|
object.__setattr__(newpolicy, attr, value)
|
||||||
|
for attr, value in kw.items():
|
||||||
|
if not hasattr(self, attr):
|
||||||
|
raise TypeError(
|
||||||
|
"{!r} is an invalid keyword argument for {}".format(
|
||||||
|
attr, self.__class__.__name__))
|
||||||
|
object.__setattr__(newpolicy, attr, value)
|
||||||
|
return newpolicy
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
if hasattr(self, name):
|
||||||
|
msg = "{!r} object attribute {!r} is read-only"
|
||||||
|
else:
|
||||||
|
msg = "{!r} object has no attribute {!r}"
|
||||||
|
raise AttributeError(msg.format(self.__class__.__name__, name))
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
"""Non-default values from right operand override those from left.
|
||||||
|
|
||||||
|
The object returned is a new instance of the subclass.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self.clone(**other.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
def _append_doc(doc, added_doc):
|
||||||
|
doc = doc.rsplit('\n', 1)[0]
|
||||||
|
added_doc = added_doc.split('\n', 1)[1]
|
||||||
|
return doc + '\n' + added_doc
|
||||||
|
|
||||||
|
def _extend_docstrings(cls):
|
||||||
|
if cls.__doc__ and cls.__doc__.startswith('+'):
|
||||||
|
cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
|
||||||
|
for name, attr in cls.__dict__.items():
|
||||||
|
if attr.__doc__ and attr.__doc__.startswith('+'):
|
||||||
|
for c in (c for base in cls.__bases__ for c in base.mro()):
|
||||||
|
doc = getattr(getattr(c, name), '__doc__')
|
||||||
|
if doc:
|
||||||
|
attr.__doc__ = _append_doc(doc, attr.__doc__)
|
||||||
|
break
|
||||||
|
return cls
|
||||||
|
|
||||||
|
|
||||||
|
class Policy(with_metaclass(abc.ABCMeta, _PolicyBase)):
|
||||||
|
|
||||||
|
r"""Controls for how messages are interpreted and formatted.
|
||||||
|
|
||||||
|
Most of the classes and many of the methods in the email package accept
|
||||||
|
Policy objects as parameters. A Policy object contains a set of values and
|
||||||
|
functions that control how input is interpreted and how output is rendered.
|
||||||
|
For example, the parameter 'raise_on_defect' controls whether or not an RFC
|
||||||
|
violation results in an error being raised or not, while 'max_line_length'
|
||||||
|
controls the maximum length of output lines when a Message is serialized.
|
||||||
|
|
||||||
|
Any valid attribute may be overridden when a Policy is created by passing
|
||||||
|
it as a keyword argument to the constructor. Policy objects are immutable,
|
||||||
|
but a new Policy object can be created with only certain values changed by
|
||||||
|
calling the Policy instance with keyword arguments. Policy objects can
|
||||||
|
also be added, producing a new Policy object in which the non-default
|
||||||
|
attributes set in the right hand operand overwrite those specified in the
|
||||||
|
left operand.
|
||||||
|
|
||||||
|
Settable attributes:
|
||||||
|
|
||||||
|
raise_on_defect -- If true, then defects should be raised as errors.
|
||||||
|
Default: False.
|
||||||
|
|
||||||
|
linesep -- string containing the value to use as separation
|
||||||
|
between output lines. Default '\n'.
|
||||||
|
|
||||||
|
cte_type -- Type of allowed content transfer encodings
|
||||||
|
|
||||||
|
7bit -- ASCII only
|
||||||
|
8bit -- Content-Transfer-Encoding: 8bit is allowed
|
||||||
|
|
||||||
|
Default: 8bit. Also controls the disposition of
|
||||||
|
(RFC invalid) binary data in headers; see the
|
||||||
|
documentation of the binary_fold method.
|
||||||
|
|
||||||
|
max_line_length -- maximum length of lines, excluding 'linesep',
|
||||||
|
during serialization. None or 0 means no line
|
||||||
|
wrapping is done. Default is 78.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
raise_on_defect = False
|
||||||
|
linesep = '\n'
|
||||||
|
cte_type = '8bit'
|
||||||
|
max_line_length = 78
|
||||||
|
|
||||||
|
def handle_defect(self, obj, defect):
|
||||||
|
"""Based on policy, either raise defect or call register_defect.
|
||||||
|
|
||||||
|
handle_defect(obj, defect)
|
||||||
|
|
||||||
|
defect should be a Defect subclass, but in any case must be an
|
||||||
|
Exception subclass. obj is the object on which the defect should be
|
||||||
|
registered if it is not raised. If the raise_on_defect is True, the
|
||||||
|
defect is raised as an error, otherwise the object and the defect are
|
||||||
|
passed to register_defect.
|
||||||
|
|
||||||
|
This method is intended to be called by parsers that discover defects.
|
||||||
|
The email package parsers always call it with Defect instances.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self.raise_on_defect:
|
||||||
|
raise defect
|
||||||
|
self.register_defect(obj, defect)
|
||||||
|
|
||||||
|
def register_defect(self, obj, defect):
|
||||||
|
"""Record 'defect' on 'obj'.
|
||||||
|
|
||||||
|
Called by handle_defect if raise_on_defect is False. This method is
|
||||||
|
part of the Policy API so that Policy subclasses can implement custom
|
||||||
|
defect handling. The default implementation calls the append method of
|
||||||
|
the defects attribute of obj. The objects used by the email package by
|
||||||
|
default that get passed to this method will always have a defects
|
||||||
|
attribute with an append method.
|
||||||
|
|
||||||
|
"""
|
||||||
|
obj.defects.append(defect)
|
||||||
|
|
||||||
|
def header_max_count(self, name):
|
||||||
|
"""Return the maximum allowed number of headers named 'name'.
|
||||||
|
|
||||||
|
Called when a header is added to a Message object. If the returned
|
||||||
|
value is not 0 or None, and there are already a number of headers with
|
||||||
|
the name 'name' equal to the value returned, a ValueError is raised.
|
||||||
|
|
||||||
|
Because the default behavior of Message's __setitem__ is to append the
|
||||||
|
value to the list of headers, it is easy to create duplicate headers
|
||||||
|
without realizing it. This method allows certain headers to be limited
|
||||||
|
in the number of instances of that header that may be added to a
|
||||||
|
Message programmatically. (The limit is not observed by the parser,
|
||||||
|
which will faithfully produce as many headers as exist in the message
|
||||||
|
being parsed.)
|
||||||
|
|
||||||
|
The default implementation returns None for all header names.
|
||||||
|
"""
|
||||||
|
return None
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def header_source_parse(self, sourcelines):
|
||||||
|
"""Given a list of linesep terminated strings constituting the lines of
|
||||||
|
a single header, return the (name, value) tuple that should be stored
|
||||||
|
in the model. The input lines should retain their terminating linesep
|
||||||
|
characters. The lines passed in by the email package may contain
|
||||||
|
surrogateescaped binary data.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def header_store_parse(self, name, value):
|
||||||
|
"""Given the header name and the value provided by the application
|
||||||
|
program, return the (name, value) that should be stored in the model.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def header_fetch_parse(self, name, value):
|
||||||
|
"""Given the header name and the value from the model, return the value
|
||||||
|
to be returned to the application program that is requesting that
|
||||||
|
header. The value passed in by the email package may contain
|
||||||
|
surrogateescaped binary data if the lines were parsed by a BytesParser.
|
||||||
|
The returned value should not contain any surrogateescaped data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def fold(self, name, value):
|
||||||
|
"""Given the header name and the value from the model, return a string
|
||||||
|
containing linesep characters that implement the folding of the header
|
||||||
|
according to the policy controls. The value passed in by the email
|
||||||
|
package may contain surrogateescaped binary data if the lines were
|
||||||
|
parsed by a BytesParser. The returned value should not contain any
|
||||||
|
surrogateescaped data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def fold_binary(self, name, value):
|
||||||
|
"""Given the header name and the value from the model, return binary
|
||||||
|
data containing linesep characters that implement the folding of the
|
||||||
|
header according to the policy controls. The value passed in by the
|
||||||
|
email package may contain surrogateescaped binary data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
@_extend_docstrings
|
||||||
|
class Compat32(Policy):
|
||||||
|
|
||||||
|
"""+
|
||||||
|
This particular policy is the backward compatibility Policy. It
|
||||||
|
replicates the behavior of the email package version 5.1.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _sanitize_header(self, name, value):
|
||||||
|
# If the header value contains surrogates, return a Header using
|
||||||
|
# the unknown-8bit charset to encode the bytes as encoded words.
|
||||||
|
if not isinstance(value, str):
|
||||||
|
# Assume it is already a header object
|
||||||
|
return value
|
||||||
|
if _has_surrogates(value):
|
||||||
|
return header.Header(value, charset=_charset.UNKNOWN8BIT,
|
||||||
|
header_name=name)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
def header_source_parse(self, sourcelines):
|
||||||
|
"""+
|
||||||
|
The name is parsed as everything up to the ':' and returned unmodified.
|
||||||
|
The value is determined by stripping leading whitespace off the
|
||||||
|
remainder of the first line, joining all subsequent lines together, and
|
||||||
|
stripping any trailing carriage return or linefeed characters.
|
||||||
|
|
||||||
|
"""
|
||||||
|
name, value = sourcelines[0].split(':', 1)
|
||||||
|
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
|
||||||
|
return (name, value.rstrip('\r\n'))
|
||||||
|
|
||||||
|
def header_store_parse(self, name, value):
|
||||||
|
"""+
|
||||||
|
The name and value are returned unmodified.
|
||||||
|
"""
|
||||||
|
return (name, value)
|
||||||
|
|
||||||
|
def header_fetch_parse(self, name, value):
|
||||||
|
"""+
|
||||||
|
If the value contains binary data, it is converted into a Header object
|
||||||
|
using the unknown-8bit charset. Otherwise it is returned unmodified.
|
||||||
|
"""
|
||||||
|
return self._sanitize_header(name, value)
|
||||||
|
|
||||||
|
def fold(self, name, value):
|
||||||
|
"""+
|
||||||
|
Headers are folded using the Header folding algorithm, which preserves
|
||||||
|
existing line breaks in the value, and wraps each resulting line to the
|
||||||
|
max_line_length. Non-ASCII binary data are CTE encoded using the
|
||||||
|
unknown-8bit charset.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._fold(name, value, sanitize=True)
|
||||||
|
|
||||||
|
def fold_binary(self, name, value):
|
||||||
|
"""+
|
||||||
|
Headers are folded using the Header folding algorithm, which preserves
|
||||||
|
existing line breaks in the value, and wraps each resulting line to the
|
||||||
|
max_line_length. If cte_type is 7bit, non-ascii binary data is CTE
|
||||||
|
encoded using the unknown-8bit charset. Otherwise the original source
|
||||||
|
header is used, with its existing line breaks and/or binary data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
|
||||||
|
return folded.encode('ascii', 'surrogateescape')
|
||||||
|
|
||||||
|
def _fold(self, name, value, sanitize):
|
||||||
|
parts = []
|
||||||
|
parts.append('%s: ' % name)
|
||||||
|
if isinstance(value, str):
|
||||||
|
if _has_surrogates(value):
|
||||||
|
if sanitize:
|
||||||
|
h = header.Header(value,
|
||||||
|
charset=_charset.UNKNOWN8BIT,
|
||||||
|
header_name=name)
|
||||||
|
else:
|
||||||
|
# If we have raw 8bit data in a byte string, we have no idea
|
||||||
|
# what the encoding is. There is no safe way to split this
|
||||||
|
# string. If it's ascii-subset, then we could do a normal
|
||||||
|
# ascii split, but if it's multibyte then we could break the
|
||||||
|
# string. There's no way to know so the least harm seems to
|
||||||
|
# be to not split the string and risk it being too long.
|
||||||
|
parts.append(value)
|
||||||
|
h = None
|
||||||
|
else:
|
||||||
|
h = header.Header(value, header_name=name)
|
||||||
|
else:
|
||||||
|
# Assume it is a Header-like object.
|
||||||
|
h = value
|
||||||
|
if h is not None:
|
||||||
|
parts.append(h.encode(linesep=self.linesep,
|
||||||
|
maxlinelen=self.max_line_length))
|
||||||
|
parts.append(self.linesep)
|
||||||
|
return ''.join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
compat32 = Compat32()
|
120
venv/Lib/site-packages/future/backports/email/base64mime.py
Normal file
120
venv/Lib/site-packages/future/backports/email/base64mime.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
# Copyright (C) 2002-2007 Python Software Foundation
|
||||||
|
# Author: Ben Gertzfield
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Base64 content transfer encoding per RFCs 2045-2047.
|
||||||
|
|
||||||
|
This module handles the content transfer encoding method defined in RFC 2045
|
||||||
|
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
|
||||||
|
characters encoding known as Base64.
|
||||||
|
|
||||||
|
It is used in the MIME standards for email to attach images, audio, and text
|
||||||
|
using some 8-bit character sets to messages.
|
||||||
|
|
||||||
|
This module provides an interface to encode and decode both headers and bodies
|
||||||
|
with Base64 encoding.
|
||||||
|
|
||||||
|
RFC 2045 defines a method for including character set information in an
|
||||||
|
`encoded-word' in a header. This method is commonly used for 8-bit real names
|
||||||
|
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
|
||||||
|
|
||||||
|
This module does not do the line wrapping or end-of-line character conversion
|
||||||
|
necessary for proper internationalized headers; it only does dumb encoding and
|
||||||
|
decoding. To deal with the various line wrapping issues, use the email.header
|
||||||
|
module.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import range
|
||||||
|
from future.builtins import bytes
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'body_decode',
|
||||||
|
'body_encode',
|
||||||
|
'decode',
|
||||||
|
'decodestring',
|
||||||
|
'header_encode',
|
||||||
|
'header_length',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
from base64 import b64encode
|
||||||
|
from binascii import b2a_base64, a2b_base64
|
||||||
|
|
||||||
|
CRLF = '\r\n'
|
||||||
|
NL = '\n'
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
|
||||||
|
# See also Charset.py
|
||||||
|
MISC_LEN = 7
|
||||||
|
|
||||||
|
|
||||||
|
# Helpers
|
||||||
|
def header_length(bytearray):
|
||||||
|
"""Return the length of s when it is encoded with base64."""
|
||||||
|
groups_of_3, leftover = divmod(len(bytearray), 3)
|
||||||
|
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||||
|
n = groups_of_3 * 4
|
||||||
|
if leftover:
|
||||||
|
n += 4
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def header_encode(header_bytes, charset='iso-8859-1'):
|
||||||
|
"""Encode a single header line with Base64 encoding in a given charset.
|
||||||
|
|
||||||
|
charset names the character set to use to encode the header. It defaults
|
||||||
|
to iso-8859-1. Base64 encoding is defined in RFC 2045.
|
||||||
|
"""
|
||||||
|
if not header_bytes:
|
||||||
|
return ""
|
||||||
|
if isinstance(header_bytes, str):
|
||||||
|
header_bytes = header_bytes.encode(charset)
|
||||||
|
encoded = b64encode(header_bytes).decode("ascii")
|
||||||
|
return '=?%s?b?%s?=' % (charset, encoded)
|
||||||
|
|
||||||
|
|
||||||
|
def body_encode(s, maxlinelen=76, eol=NL):
|
||||||
|
r"""Encode a string with base64.
|
||||||
|
|
||||||
|
Each line will be wrapped at, at most, maxlinelen characters (defaults to
|
||||||
|
76 characters).
|
||||||
|
|
||||||
|
Each line of encoded text will end with eol, which defaults to "\n". Set
|
||||||
|
this to "\r\n" if you will be using the result of this function directly
|
||||||
|
in an email.
|
||||||
|
"""
|
||||||
|
if not s:
|
||||||
|
return s
|
||||||
|
|
||||||
|
encvec = []
|
||||||
|
max_unencoded = maxlinelen * 3 // 4
|
||||||
|
for i in range(0, len(s), max_unencoded):
|
||||||
|
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
|
||||||
|
# adding a newline to the encoded string?
|
||||||
|
enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
|
||||||
|
if enc.endswith(NL) and eol != NL:
|
||||||
|
enc = enc[:-1] + eol
|
||||||
|
encvec.append(enc)
|
||||||
|
return EMPTYSTRING.join(encvec)
|
||||||
|
|
||||||
|
|
||||||
|
def decode(string):
|
||||||
|
"""Decode a raw base64 string, returning a bytes object.
|
||||||
|
|
||||||
|
This function does not parse a full MIME header value encoded with
|
||||||
|
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
|
||||||
|
level email.header class for that functionality.
|
||||||
|
"""
|
||||||
|
if not string:
|
||||||
|
return bytes()
|
||||||
|
elif isinstance(string, str):
|
||||||
|
return a2b_base64(string.encode('raw-unicode-escape'))
|
||||||
|
else:
|
||||||
|
return a2b_base64(string)
|
||||||
|
|
||||||
|
|
||||||
|
# For convenience and backwards compatibility w/ standard base64 module
|
||||||
|
body_decode = decode
|
||||||
|
decodestring = decode
|
409
venv/Lib/site-packages/future/backports/email/charset.py
Normal file
409
venv/Lib/site-packages/future/backports/email/charset.py
Normal file
@ -0,0 +1,409 @@
|
|||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import str
|
||||||
|
from future.builtins import next
|
||||||
|
|
||||||
|
# Copyright (C) 2001-2007 Python Software Foundation
|
||||||
|
# Author: Ben Gertzfield, Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Charset',
|
||||||
|
'add_alias',
|
||||||
|
'add_charset',
|
||||||
|
'add_codec',
|
||||||
|
]
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from future.backports import email
|
||||||
|
from future.backports.email import errors
|
||||||
|
from future.backports.email.encoders import encode_7or8bit
|
||||||
|
|
||||||
|
|
||||||
|
# Flags for types of header encodings
|
||||||
|
QP = 1 # Quoted-Printable
|
||||||
|
BASE64 = 2 # Base64
|
||||||
|
SHORTEST = 3 # the shorter of QP and base64, but only for headers
|
||||||
|
|
||||||
|
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
|
||||||
|
RFC2047_CHROME_LEN = 7
|
||||||
|
|
||||||
|
DEFAULT_CHARSET = 'us-ascii'
|
||||||
|
UNKNOWN8BIT = 'unknown-8bit'
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
CHARSETS = {
|
||||||
|
# input header enc body enc output conv
|
||||||
|
'iso-8859-1': (QP, QP, None),
|
||||||
|
'iso-8859-2': (QP, QP, None),
|
||||||
|
'iso-8859-3': (QP, QP, None),
|
||||||
|
'iso-8859-4': (QP, QP, None),
|
||||||
|
# iso-8859-5 is Cyrillic, and not especially used
|
||||||
|
# iso-8859-6 is Arabic, also not particularly used
|
||||||
|
# iso-8859-7 is Greek, QP will not make it readable
|
||||||
|
# iso-8859-8 is Hebrew, QP will not make it readable
|
||||||
|
'iso-8859-9': (QP, QP, None),
|
||||||
|
'iso-8859-10': (QP, QP, None),
|
||||||
|
# iso-8859-11 is Thai, QP will not make it readable
|
||||||
|
'iso-8859-13': (QP, QP, None),
|
||||||
|
'iso-8859-14': (QP, QP, None),
|
||||||
|
'iso-8859-15': (QP, QP, None),
|
||||||
|
'iso-8859-16': (QP, QP, None),
|
||||||
|
'windows-1252':(QP, QP, None),
|
||||||
|
'viscii': (QP, QP, None),
|
||||||
|
'us-ascii': (None, None, None),
|
||||||
|
'big5': (BASE64, BASE64, None),
|
||||||
|
'gb2312': (BASE64, BASE64, None),
|
||||||
|
'euc-jp': (BASE64, None, 'iso-2022-jp'),
|
||||||
|
'shift_jis': (BASE64, None, 'iso-2022-jp'),
|
||||||
|
'iso-2022-jp': (BASE64, None, None),
|
||||||
|
'koi8-r': (BASE64, BASE64, None),
|
||||||
|
'utf-8': (SHORTEST, BASE64, 'utf-8'),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Aliases for other commonly-used names for character sets. Map
|
||||||
|
# them to the real ones used in email.
|
||||||
|
ALIASES = {
|
||||||
|
'latin_1': 'iso-8859-1',
|
||||||
|
'latin-1': 'iso-8859-1',
|
||||||
|
'latin_2': 'iso-8859-2',
|
||||||
|
'latin-2': 'iso-8859-2',
|
||||||
|
'latin_3': 'iso-8859-3',
|
||||||
|
'latin-3': 'iso-8859-3',
|
||||||
|
'latin_4': 'iso-8859-4',
|
||||||
|
'latin-4': 'iso-8859-4',
|
||||||
|
'latin_5': 'iso-8859-9',
|
||||||
|
'latin-5': 'iso-8859-9',
|
||||||
|
'latin_6': 'iso-8859-10',
|
||||||
|
'latin-6': 'iso-8859-10',
|
||||||
|
'latin_7': 'iso-8859-13',
|
||||||
|
'latin-7': 'iso-8859-13',
|
||||||
|
'latin_8': 'iso-8859-14',
|
||||||
|
'latin-8': 'iso-8859-14',
|
||||||
|
'latin_9': 'iso-8859-15',
|
||||||
|
'latin-9': 'iso-8859-15',
|
||||||
|
'latin_10':'iso-8859-16',
|
||||||
|
'latin-10':'iso-8859-16',
|
||||||
|
'cp949': 'ks_c_5601-1987',
|
||||||
|
'euc_jp': 'euc-jp',
|
||||||
|
'euc_kr': 'euc-kr',
|
||||||
|
'ascii': 'us-ascii',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Map charsets to their Unicode codec strings.
|
||||||
|
CODEC_MAP = {
|
||||||
|
'gb2312': 'eucgb2312_cn',
|
||||||
|
'big5': 'big5_tw',
|
||||||
|
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
|
||||||
|
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
|
||||||
|
# Let that stuff pass through without conversion to/from Unicode.
|
||||||
|
'us-ascii': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience functions for extending the above mappings
|
||||||
|
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
|
||||||
|
"""Add character set properties to the global registry.
|
||||||
|
|
||||||
|
charset is the input character set, and must be the canonical name of a
|
||||||
|
character set.
|
||||||
|
|
||||||
|
Optional header_enc and body_enc is either Charset.QP for
|
||||||
|
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
|
||||||
|
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
|
||||||
|
is only valid for header_enc. It describes how message headers and
|
||||||
|
message bodies in the input charset are to be encoded. Default is no
|
||||||
|
encoding.
|
||||||
|
|
||||||
|
Optional output_charset is the character set that the output should be
|
||||||
|
in. Conversions will proceed from input charset, to Unicode, to the
|
||||||
|
output charset when the method Charset.convert() is called. The default
|
||||||
|
is to output in the same character set as the input.
|
||||||
|
|
||||||
|
Both input_charset and output_charset must have Unicode codec entries in
|
||||||
|
the module's charset-to-codec mapping; use add_codec(charset, codecname)
|
||||||
|
to add codecs the module does not know about. See the codecs module's
|
||||||
|
documentation for more information.
|
||||||
|
"""
|
||||||
|
if body_enc == SHORTEST:
|
||||||
|
raise ValueError('SHORTEST not allowed for body_enc')
|
||||||
|
CHARSETS[charset] = (header_enc, body_enc, output_charset)
|
||||||
|
|
||||||
|
|
||||||
|
def add_alias(alias, canonical):
|
||||||
|
"""Add a character set alias.
|
||||||
|
|
||||||
|
alias is the alias name, e.g. latin-1
|
||||||
|
canonical is the character set's canonical name, e.g. iso-8859-1
|
||||||
|
"""
|
||||||
|
ALIASES[alias] = canonical
|
||||||
|
|
||||||
|
|
||||||
|
def add_codec(charset, codecname):
|
||||||
|
"""Add a codec that map characters in the given charset to/from Unicode.
|
||||||
|
|
||||||
|
charset is the canonical name of a character set. codecname is the name
|
||||||
|
of a Python codec, as appropriate for the second argument to the unicode()
|
||||||
|
built-in, or to the encode() method of a Unicode string.
|
||||||
|
"""
|
||||||
|
CODEC_MAP[charset] = codecname
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience function for encoding strings, taking into account
|
||||||
|
# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
|
||||||
|
def _encode(string, codec):
|
||||||
|
string = str(string)
|
||||||
|
if codec == UNKNOWN8BIT:
|
||||||
|
return string.encode('ascii', 'surrogateescape')
|
||||||
|
else:
|
||||||
|
return string.encode(codec)
|
||||||
|
|
||||||
|
|
||||||
|
class Charset(object):
|
||||||
|
"""Map character sets to their email properties.
|
||||||
|
|
||||||
|
This class provides information about the requirements imposed on email
|
||||||
|
for a specific character set. It also provides convenience routines for
|
||||||
|
converting between character sets, given the availability of the
|
||||||
|
applicable codecs. Given a character set, it will do its best to provide
|
||||||
|
information on how to use that character set in an email in an
|
||||||
|
RFC-compliant way.
|
||||||
|
|
||||||
|
Certain character sets must be encoded with quoted-printable or base64
|
||||||
|
when used in email headers or bodies. Certain character sets must be
|
||||||
|
converted outright, and are not allowed in email. Instances of this
|
||||||
|
module expose the following information about a character set:
|
||||||
|
|
||||||
|
input_charset: The initial character set specified. Common aliases
|
||||||
|
are converted to their `official' email names (e.g. latin_1
|
||||||
|
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
|
||||||
|
|
||||||
|
header_encoding: If the character set must be encoded before it can be
|
||||||
|
used in an email header, this attribute will be set to
|
||||||
|
Charset.QP (for quoted-printable), Charset.BASE64 (for
|
||||||
|
base64 encoding), or Charset.SHORTEST for the shortest of
|
||||||
|
QP or BASE64 encoding. Otherwise, it will be None.
|
||||||
|
|
||||||
|
body_encoding: Same as header_encoding, but describes the encoding for the
|
||||||
|
mail message's body, which indeed may be different than the
|
||||||
|
header encoding. Charset.SHORTEST is not allowed for
|
||||||
|
body_encoding.
|
||||||
|
|
||||||
|
output_charset: Some character sets must be converted before they can be
|
||||||
|
used in email headers or bodies. If the input_charset is
|
||||||
|
one of them, this attribute will contain the name of the
|
||||||
|
charset output will be converted to. Otherwise, it will
|
||||||
|
be None.
|
||||||
|
|
||||||
|
input_codec: The name of the Python codec used to convert the
|
||||||
|
input_charset to Unicode. If no conversion codec is
|
||||||
|
necessary, this attribute will be None.
|
||||||
|
|
||||||
|
output_codec: The name of the Python codec used to convert Unicode
|
||||||
|
to the output_charset. If no conversion codec is necessary,
|
||||||
|
this attribute will have the same value as the input_codec.
|
||||||
|
"""
|
||||||
|
def __init__(self, input_charset=DEFAULT_CHARSET):
|
||||||
|
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
|
||||||
|
# unicode because its .lower() is locale insensitive. If the argument
|
||||||
|
# is already a unicode, we leave it at that, but ensure that the
|
||||||
|
# charset is ASCII, as the standard (RFC XXX) requires.
|
||||||
|
try:
|
||||||
|
if isinstance(input_charset, str):
|
||||||
|
input_charset.encode('ascii')
|
||||||
|
else:
|
||||||
|
input_charset = str(input_charset, 'ascii')
|
||||||
|
except UnicodeError:
|
||||||
|
raise errors.CharsetError(input_charset)
|
||||||
|
input_charset = input_charset.lower()
|
||||||
|
# Set the input charset after filtering through the aliases
|
||||||
|
self.input_charset = ALIASES.get(input_charset, input_charset)
|
||||||
|
# We can try to guess which encoding and conversion to use by the
|
||||||
|
# charset_map dictionary. Try that first, but let the user override
|
||||||
|
# it.
|
||||||
|
henc, benc, conv = CHARSETS.get(self.input_charset,
|
||||||
|
(SHORTEST, BASE64, None))
|
||||||
|
if not conv:
|
||||||
|
conv = self.input_charset
|
||||||
|
# Set the attributes, allowing the arguments to override the default.
|
||||||
|
self.header_encoding = henc
|
||||||
|
self.body_encoding = benc
|
||||||
|
self.output_charset = ALIASES.get(conv, conv)
|
||||||
|
# Now set the codecs. If one isn't defined for input_charset,
|
||||||
|
# guess and try a Unicode codec with the same name as input_codec.
|
||||||
|
self.input_codec = CODEC_MAP.get(self.input_charset,
|
||||||
|
self.input_charset)
|
||||||
|
self.output_codec = CODEC_MAP.get(self.output_charset,
|
||||||
|
self.output_charset)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.input_charset.lower()
|
||||||
|
|
||||||
|
__repr__ = __str__
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return str(self) == str(other).lower()
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
def get_body_encoding(self):
|
||||||
|
"""Return the content-transfer-encoding used for body encoding.
|
||||||
|
|
||||||
|
This is either the string `quoted-printable' or `base64' depending on
|
||||||
|
the encoding used, or it is a function in which case you should call
|
||||||
|
the function with a single argument, the Message object being
|
||||||
|
encoded. The function should then set the Content-Transfer-Encoding
|
||||||
|
header itself to whatever is appropriate.
|
||||||
|
|
||||||
|
Returns "quoted-printable" if self.body_encoding is QP.
|
||||||
|
Returns "base64" if self.body_encoding is BASE64.
|
||||||
|
Returns conversion function otherwise.
|
||||||
|
"""
|
||||||
|
assert self.body_encoding != SHORTEST
|
||||||
|
if self.body_encoding == QP:
|
||||||
|
return 'quoted-printable'
|
||||||
|
elif self.body_encoding == BASE64:
|
||||||
|
return 'base64'
|
||||||
|
else:
|
||||||
|
return encode_7or8bit
|
||||||
|
|
||||||
|
def get_output_charset(self):
|
||||||
|
"""Return the output character set.
|
||||||
|
|
||||||
|
This is self.output_charset if that is not None, otherwise it is
|
||||||
|
self.input_charset.
|
||||||
|
"""
|
||||||
|
return self.output_charset or self.input_charset
|
||||||
|
|
||||||
|
def header_encode(self, string):
|
||||||
|
"""Header-encode a string by converting it first to bytes.
|
||||||
|
|
||||||
|
The type of encoding (base64 or quoted-printable) will be based on
|
||||||
|
this charset's `header_encoding`.
|
||||||
|
|
||||||
|
:param string: A unicode string for the header. It must be possible
|
||||||
|
to encode this string to bytes using the character set's
|
||||||
|
output codec.
|
||||||
|
:return: The encoded string, with RFC 2047 chrome.
|
||||||
|
"""
|
||||||
|
codec = self.output_codec or 'us-ascii'
|
||||||
|
header_bytes = _encode(string, codec)
|
||||||
|
# 7bit/8bit encodings return the string unchanged (modulo conversions)
|
||||||
|
encoder_module = self._get_encoder(header_bytes)
|
||||||
|
if encoder_module is None:
|
||||||
|
return string
|
||||||
|
return encoder_module.header_encode(header_bytes, codec)
|
||||||
|
|
||||||
|
def header_encode_lines(self, string, maxlengths):
|
||||||
|
"""Header-encode a string by converting it first to bytes.
|
||||||
|
|
||||||
|
This is similar to `header_encode()` except that the string is fit
|
||||||
|
into maximum line lengths as given by the argument.
|
||||||
|
|
||||||
|
:param string: A unicode string for the header. It must be possible
|
||||||
|
to encode this string to bytes using the character set's
|
||||||
|
output codec.
|
||||||
|
:param maxlengths: Maximum line length iterator. Each element
|
||||||
|
returned from this iterator will provide the next maximum line
|
||||||
|
length. This parameter is used as an argument to built-in next()
|
||||||
|
and should never be exhausted. The maximum line lengths should
|
||||||
|
not count the RFC 2047 chrome. These line lengths are only a
|
||||||
|
hint; the splitter does the best it can.
|
||||||
|
:return: Lines of encoded strings, each with RFC 2047 chrome.
|
||||||
|
"""
|
||||||
|
# See which encoding we should use.
|
||||||
|
codec = self.output_codec or 'us-ascii'
|
||||||
|
header_bytes = _encode(string, codec)
|
||||||
|
encoder_module = self._get_encoder(header_bytes)
|
||||||
|
encoder = partial(encoder_module.header_encode, charset=codec)
|
||||||
|
# Calculate the number of characters that the RFC 2047 chrome will
|
||||||
|
# contribute to each line.
|
||||||
|
charset = self.get_output_charset()
|
||||||
|
extra = len(charset) + RFC2047_CHROME_LEN
|
||||||
|
# Now comes the hard part. We must encode bytes but we can't split on
|
||||||
|
# bytes because some character sets are variable length and each
|
||||||
|
# encoded word must stand on its own. So the problem is you have to
|
||||||
|
# encode to bytes to figure out this word's length, but you must split
|
||||||
|
# on characters. This causes two problems: first, we don't know how
|
||||||
|
# many octets a specific substring of unicode characters will get
|
||||||
|
# encoded to, and second, we don't know how many ASCII characters
|
||||||
|
# those octets will get encoded to. Unless we try it. Which seems
|
||||||
|
# inefficient. In the interest of being correct rather than fast (and
|
||||||
|
# in the hope that there will be few encoded headers in any such
|
||||||
|
# message), brute force it. :(
|
||||||
|
lines = []
|
||||||
|
current_line = []
|
||||||
|
maxlen = next(maxlengths) - extra
|
||||||
|
for character in string:
|
||||||
|
current_line.append(character)
|
||||||
|
this_line = EMPTYSTRING.join(current_line)
|
||||||
|
length = encoder_module.header_length(_encode(this_line, charset))
|
||||||
|
if length > maxlen:
|
||||||
|
# This last character doesn't fit so pop it off.
|
||||||
|
current_line.pop()
|
||||||
|
# Does nothing fit on the first line?
|
||||||
|
if not lines and not current_line:
|
||||||
|
lines.append(None)
|
||||||
|
else:
|
||||||
|
separator = (' ' if lines else '')
|
||||||
|
joined_line = EMPTYSTRING.join(current_line)
|
||||||
|
header_bytes = _encode(joined_line, codec)
|
||||||
|
lines.append(encoder(header_bytes))
|
||||||
|
current_line = [character]
|
||||||
|
maxlen = next(maxlengths) - extra
|
||||||
|
joined_line = EMPTYSTRING.join(current_line)
|
||||||
|
header_bytes = _encode(joined_line, codec)
|
||||||
|
lines.append(encoder(header_bytes))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def _get_encoder(self, header_bytes):
|
||||||
|
if self.header_encoding == BASE64:
|
||||||
|
return email.base64mime
|
||||||
|
elif self.header_encoding == QP:
|
||||||
|
return email.quoprimime
|
||||||
|
elif self.header_encoding == SHORTEST:
|
||||||
|
len64 = email.base64mime.header_length(header_bytes)
|
||||||
|
lenqp = email.quoprimime.header_length(header_bytes)
|
||||||
|
if len64 < lenqp:
|
||||||
|
return email.base64mime
|
||||||
|
else:
|
||||||
|
return email.quoprimime
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def body_encode(self, string):
|
||||||
|
"""Body-encode a string by converting it first to bytes.
|
||||||
|
|
||||||
|
The type of encoding (base64 or quoted-printable) will be based on
|
||||||
|
self.body_encoding. If body_encoding is None, we assume the
|
||||||
|
output charset is a 7bit encoding, so re-encoding the decoded
|
||||||
|
string using the ascii codec produces the correct string version
|
||||||
|
of the content.
|
||||||
|
"""
|
||||||
|
if not string:
|
||||||
|
return string
|
||||||
|
if self.body_encoding is BASE64:
|
||||||
|
if isinstance(string, str):
|
||||||
|
string = string.encode(self.output_charset)
|
||||||
|
return email.base64mime.body_encode(string)
|
||||||
|
elif self.body_encoding is QP:
|
||||||
|
# quopromime.body_encode takes a string, but operates on it as if
|
||||||
|
# it were a list of byte codes. For a (minimal) history on why
|
||||||
|
# this is so, see changeset 0cf700464177. To correctly encode a
|
||||||
|
# character set, then, we must turn it into pseudo bytes via the
|
||||||
|
# latin1 charset, which will encode any byte as a single code point
|
||||||
|
# between 0 and 255, which is what body_encode is expecting.
|
||||||
|
if isinstance(string, str):
|
||||||
|
string = string.encode(self.output_charset)
|
||||||
|
string = string.decode('latin1')
|
||||||
|
return email.quoprimime.body_encode(string)
|
||||||
|
else:
|
||||||
|
if isinstance(string, str):
|
||||||
|
string = string.encode(self.output_charset).decode('ascii')
|
||||||
|
return string
|
90
venv/Lib/site-packages/future/backports/email/encoders.py
Normal file
90
venv/Lib/site-packages/future/backports/email/encoders.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Encodings and related functions."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import str
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'encode_7or8bit',
|
||||||
|
'encode_base64',
|
||||||
|
'encode_noop',
|
||||||
|
'encode_quopri',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from base64 import encodebytes as _bencode
|
||||||
|
except ImportError:
|
||||||
|
# Py2 compatibility. TODO: test this!
|
||||||
|
from base64 import encodestring as _bencode
|
||||||
|
from quopri import encodestring as _encodestring
|
||||||
|
|
||||||
|
|
||||||
|
def _qencode(s):
|
||||||
|
enc = _encodestring(s, quotetabs=True)
|
||||||
|
# Must encode spaces, which quopri.encodestring() doesn't do
|
||||||
|
return enc.replace(' ', '=20')
|
||||||
|
|
||||||
|
|
||||||
|
def encode_base64(msg):
|
||||||
|
"""Encode the message's payload in Base64.
|
||||||
|
|
||||||
|
Also, add an appropriate Content-Transfer-Encoding header.
|
||||||
|
"""
|
||||||
|
orig = msg.get_payload()
|
||||||
|
encdata = str(_bencode(orig), 'ascii')
|
||||||
|
msg.set_payload(encdata)
|
||||||
|
msg['Content-Transfer-Encoding'] = 'base64'
|
||||||
|
|
||||||
|
|
||||||
|
def encode_quopri(msg):
|
||||||
|
"""Encode the message's payload in quoted-printable.
|
||||||
|
|
||||||
|
Also, add an appropriate Content-Transfer-Encoding header.
|
||||||
|
"""
|
||||||
|
orig = msg.get_payload()
|
||||||
|
encdata = _qencode(orig)
|
||||||
|
msg.set_payload(encdata)
|
||||||
|
msg['Content-Transfer-Encoding'] = 'quoted-printable'
|
||||||
|
|
||||||
|
|
||||||
|
def encode_7or8bit(msg):
|
||||||
|
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
|
||||||
|
orig = msg.get_payload()
|
||||||
|
if orig is None:
|
||||||
|
# There's no payload. For backwards compatibility we use 7bit
|
||||||
|
msg['Content-Transfer-Encoding'] = '7bit'
|
||||||
|
return
|
||||||
|
# We play a trick to make this go fast. If encoding/decode to ASCII
|
||||||
|
# succeeds, we know the data must be 7bit, otherwise treat it as 8bit.
|
||||||
|
try:
|
||||||
|
if isinstance(orig, str):
|
||||||
|
orig.encode('ascii')
|
||||||
|
else:
|
||||||
|
orig.decode('ascii')
|
||||||
|
except UnicodeError:
|
||||||
|
charset = msg.get_charset()
|
||||||
|
output_cset = charset and charset.output_charset
|
||||||
|
# iso-2022-* is non-ASCII but encodes to a 7-bit representation
|
||||||
|
if output_cset and output_cset.lower().startswith('iso-2022-'):
|
||||||
|
msg['Content-Transfer-Encoding'] = '7bit'
|
||||||
|
else:
|
||||||
|
msg['Content-Transfer-Encoding'] = '8bit'
|
||||||
|
else:
|
||||||
|
msg['Content-Transfer-Encoding'] = '7bit'
|
||||||
|
if not isinstance(orig, str):
|
||||||
|
msg.set_payload(orig.decode('ascii', 'surrogateescape'))
|
||||||
|
|
||||||
|
|
||||||
|
def encode_noop(msg):
|
||||||
|
"""Do nothing."""
|
||||||
|
# Well, not quite *nothing*: in Python3 we have to turn bytes into a string
|
||||||
|
# in our internal surrogateescaped form in order to keep the model
|
||||||
|
# consistent.
|
||||||
|
orig = msg.get_payload()
|
||||||
|
if not isinstance(orig, str):
|
||||||
|
msg.set_payload(orig.decode('ascii', 'surrogateescape'))
|
111
venv/Lib/site-packages/future/backports/email/errors.py
Normal file
111
venv/Lib/site-packages/future/backports/email/errors.py
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""email package exception classes."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import super
|
||||||
|
|
||||||
|
|
||||||
|
class MessageError(Exception):
|
||||||
|
"""Base class for errors in the email package."""
|
||||||
|
|
||||||
|
|
||||||
|
class MessageParseError(MessageError):
|
||||||
|
"""Base class for message parsing errors."""
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderParseError(MessageParseError):
|
||||||
|
"""Error while parsing headers."""
|
||||||
|
|
||||||
|
|
||||||
|
class BoundaryError(MessageParseError):
|
||||||
|
"""Couldn't find terminating boundary."""
|
||||||
|
|
||||||
|
|
||||||
|
class MultipartConversionError(MessageError, TypeError):
|
||||||
|
"""Conversion to a multipart is prohibited."""
|
||||||
|
|
||||||
|
|
||||||
|
class CharsetError(MessageError):
|
||||||
|
"""An illegal charset was given."""
|
||||||
|
|
||||||
|
|
||||||
|
# These are parsing defects which the parser was able to work around.
|
||||||
|
class MessageDefect(ValueError):
|
||||||
|
"""Base class for a message defect."""
|
||||||
|
|
||||||
|
def __init__(self, line=None):
|
||||||
|
if line is not None:
|
||||||
|
super().__init__(line)
|
||||||
|
self.line = line
|
||||||
|
|
||||||
|
class NoBoundaryInMultipartDefect(MessageDefect):
|
||||||
|
"""A message claimed to be a multipart but had no boundary parameter."""
|
||||||
|
|
||||||
|
class StartBoundaryNotFoundDefect(MessageDefect):
|
||||||
|
"""The claimed start boundary was never found."""
|
||||||
|
|
||||||
|
class CloseBoundaryNotFoundDefect(MessageDefect):
|
||||||
|
"""A start boundary was found, but not the corresponding close boundary."""
|
||||||
|
|
||||||
|
class FirstHeaderLineIsContinuationDefect(MessageDefect):
|
||||||
|
"""A message had a continuation line as its first header line."""
|
||||||
|
|
||||||
|
class MisplacedEnvelopeHeaderDefect(MessageDefect):
|
||||||
|
"""A 'Unix-from' header was found in the middle of a header block."""
|
||||||
|
|
||||||
|
class MissingHeaderBodySeparatorDefect(MessageDefect):
|
||||||
|
"""Found line with no leading whitespace and no colon before blank line."""
|
||||||
|
# XXX: backward compatibility, just in case (it was never emitted).
|
||||||
|
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
|
||||||
|
|
||||||
|
class MultipartInvariantViolationDefect(MessageDefect):
|
||||||
|
"""A message claimed to be a multipart but no subparts were found."""
|
||||||
|
|
||||||
|
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
|
||||||
|
"""An invalid content transfer encoding was set on the multipart itself."""
|
||||||
|
|
||||||
|
class UndecodableBytesDefect(MessageDefect):
|
||||||
|
"""Header contained bytes that could not be decoded"""
|
||||||
|
|
||||||
|
class InvalidBase64PaddingDefect(MessageDefect):
|
||||||
|
"""base64 encoded sequence had an incorrect length"""
|
||||||
|
|
||||||
|
class InvalidBase64CharactersDefect(MessageDefect):
|
||||||
|
"""base64 encoded sequence had characters not in base64 alphabet"""
|
||||||
|
|
||||||
|
# These errors are specific to header parsing.
|
||||||
|
|
||||||
|
class HeaderDefect(MessageDefect):
|
||||||
|
"""Base class for a header defect."""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
super().__init__(*args, **kw)
|
||||||
|
|
||||||
|
class InvalidHeaderDefect(HeaderDefect):
|
||||||
|
"""Header is not valid, message gives details."""
|
||||||
|
|
||||||
|
class HeaderMissingRequiredValue(HeaderDefect):
|
||||||
|
"""A header that must have a value had none"""
|
||||||
|
|
||||||
|
class NonPrintableDefect(HeaderDefect):
|
||||||
|
"""ASCII characters outside the ascii-printable range found"""
|
||||||
|
|
||||||
|
def __init__(self, non_printables):
|
||||||
|
super().__init__(non_printables)
|
||||||
|
self.non_printables = non_printables
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return ("the following ASCII non-printables found in header: "
|
||||||
|
"{}".format(self.non_printables))
|
||||||
|
|
||||||
|
class ObsoleteHeaderDefect(HeaderDefect):
|
||||||
|
"""Header uses syntax declared obsolete by RFC 5322"""
|
||||||
|
|
||||||
|
class NonASCIILocalPartDefect(HeaderDefect):
|
||||||
|
"""local_part contains non-ASCII characters"""
|
||||||
|
# This defect only occurs during unicode parsing, not when
|
||||||
|
# parsing messages decoded from binary.
|
525
venv/Lib/site-packages/future/backports/email/feedparser.py
Normal file
525
venv/Lib/site-packages/future/backports/email/feedparser.py
Normal file
@ -0,0 +1,525 @@
|
|||||||
|
# Copyright (C) 2004-2006 Python Software Foundation
|
||||||
|
# Authors: Baxter, Wouters and Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""FeedParser - An email feed parser.
|
||||||
|
|
||||||
|
The feed parser implements an interface for incrementally parsing an email
|
||||||
|
message, line by line. This has advantages for certain applications, such as
|
||||||
|
those reading email messages off a socket.
|
||||||
|
|
||||||
|
FeedParser.feed() is the primary interface for pushing new data into the
|
||||||
|
parser. It returns when there's nothing more it can do with the available
|
||||||
|
data. When you have no more data to push into the parser, call .close().
|
||||||
|
This completes the parsing and returns the root message object.
|
||||||
|
|
||||||
|
The other advantage of this parser is that it will never raise a parsing
|
||||||
|
exception. Instead, when it finds something unexpected, it adds a 'defect' to
|
||||||
|
the current message. Defects are just instances that live on the message
|
||||||
|
object's .defects attribute.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import object, range, super
|
||||||
|
from future.utils import implements_iterator, PY3
|
||||||
|
|
||||||
|
__all__ = ['FeedParser', 'BytesFeedParser']
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from future.backports.email import errors
|
||||||
|
from future.backports.email import message
|
||||||
|
from future.backports.email._policybase import compat32
|
||||||
|
|
||||||
|
NLCRE = re.compile('\r\n|\r|\n')
|
||||||
|
NLCRE_bol = re.compile('(\r\n|\r|\n)')
|
||||||
|
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
|
||||||
|
NLCRE_crack = re.compile('(\r\n|\r|\n)')
|
||||||
|
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
|
||||||
|
# except controls, SP, and ":".
|
||||||
|
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
NL = '\n'
|
||||||
|
|
||||||
|
NeedMoreData = object()
|
||||||
|
|
||||||
|
|
||||||
|
# @implements_iterator
|
||||||
|
class BufferedSubFile(object):
|
||||||
|
"""A file-ish object that can have new data loaded into it.
|
||||||
|
|
||||||
|
You can also push and pop line-matching predicates onto a stack. When the
|
||||||
|
current predicate matches the current line, a false EOF response
|
||||||
|
(i.e. empty string) is returned instead. This lets the parser adhere to a
|
||||||
|
simple abstraction -- it parses until EOF closes the current message.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
# The last partial line pushed into this object.
|
||||||
|
self._partial = ''
|
||||||
|
# The list of full, pushed lines, in reverse order
|
||||||
|
self._lines = []
|
||||||
|
# The stack of false-EOF checking predicates.
|
||||||
|
self._eofstack = []
|
||||||
|
# A flag indicating whether the file has been closed or not.
|
||||||
|
self._closed = False
|
||||||
|
|
||||||
|
def push_eof_matcher(self, pred):
|
||||||
|
self._eofstack.append(pred)
|
||||||
|
|
||||||
|
def pop_eof_matcher(self):
|
||||||
|
return self._eofstack.pop()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
# Don't forget any trailing partial line.
|
||||||
|
self._lines.append(self._partial)
|
||||||
|
self._partial = ''
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
def readline(self):
|
||||||
|
if not self._lines:
|
||||||
|
if self._closed:
|
||||||
|
return ''
|
||||||
|
return NeedMoreData
|
||||||
|
# Pop the line off the stack and see if it matches the current
|
||||||
|
# false-EOF predicate.
|
||||||
|
line = self._lines.pop()
|
||||||
|
# RFC 2046, section 5.1.2 requires us to recognize outer level
|
||||||
|
# boundaries at any level of inner nesting. Do this, but be sure it's
|
||||||
|
# in the order of most to least nested.
|
||||||
|
for ateof in self._eofstack[::-1]:
|
||||||
|
if ateof(line):
|
||||||
|
# We're at the false EOF. But push the last line back first.
|
||||||
|
self._lines.append(line)
|
||||||
|
return ''
|
||||||
|
return line
|
||||||
|
|
||||||
|
def unreadline(self, line):
|
||||||
|
# Let the consumer push a line back into the buffer.
|
||||||
|
assert line is not NeedMoreData
|
||||||
|
self._lines.append(line)
|
||||||
|
|
||||||
|
def push(self, data):
|
||||||
|
"""Push some new data into this object."""
|
||||||
|
# Handle any previous leftovers
|
||||||
|
data, self._partial = self._partial + data, ''
|
||||||
|
# Crack into lines, but preserve the newlines on the end of each
|
||||||
|
parts = NLCRE_crack.split(data)
|
||||||
|
# The *ahem* interesting behaviour of re.split when supplied grouping
|
||||||
|
# parentheses is that the last element of the resulting list is the
|
||||||
|
# data after the final RE. In the case of a NL/CR terminated string,
|
||||||
|
# this is the empty string.
|
||||||
|
self._partial = parts.pop()
|
||||||
|
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
|
||||||
|
# is there a \n to follow later?
|
||||||
|
if not self._partial and parts and parts[-1].endswith('\r'):
|
||||||
|
self._partial = parts.pop(-2)+parts.pop()
|
||||||
|
# parts is a list of strings, alternating between the line contents
|
||||||
|
# and the eol character(s). Gather up a list of lines after
|
||||||
|
# re-attaching the newlines.
|
||||||
|
lines = []
|
||||||
|
for i in range(len(parts) // 2):
|
||||||
|
lines.append(parts[i*2] + parts[i*2+1])
|
||||||
|
self.pushlines(lines)
|
||||||
|
|
||||||
|
def pushlines(self, lines):
|
||||||
|
# Reverse and insert at the front of the lines.
|
||||||
|
self._lines[:0] = lines[::-1]
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
line = self.readline()
|
||||||
|
if line == '':
|
||||||
|
raise StopIteration
|
||||||
|
return line
|
||||||
|
|
||||||
|
|
||||||
|
class FeedParser(object):
|
||||||
|
"""A feed-style parser of email."""
|
||||||
|
|
||||||
|
def __init__(self, _factory=message.Message, **_3to2kwargs):
|
||||||
|
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||||
|
else: policy = compat32
|
||||||
|
"""_factory is called with no arguments to create a new message obj
|
||||||
|
|
||||||
|
The policy keyword specifies a policy object that controls a number of
|
||||||
|
aspects of the parser's operation. The default policy maintains
|
||||||
|
backward compatibility.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._factory = _factory
|
||||||
|
self.policy = policy
|
||||||
|
try:
|
||||||
|
_factory(policy=self.policy)
|
||||||
|
self._factory_kwds = lambda: {'policy': self.policy}
|
||||||
|
except TypeError:
|
||||||
|
# Assume this is an old-style factory
|
||||||
|
self._factory_kwds = lambda: {}
|
||||||
|
self._input = BufferedSubFile()
|
||||||
|
self._msgstack = []
|
||||||
|
if PY3:
|
||||||
|
self._parse = self._parsegen().__next__
|
||||||
|
else:
|
||||||
|
self._parse = self._parsegen().next
|
||||||
|
self._cur = None
|
||||||
|
self._last = None
|
||||||
|
self._headersonly = False
|
||||||
|
|
||||||
|
# Non-public interface for supporting Parser's headersonly flag
|
||||||
|
def _set_headersonly(self):
|
||||||
|
self._headersonly = True
|
||||||
|
|
||||||
|
def feed(self, data):
|
||||||
|
"""Push more data into the parser."""
|
||||||
|
self._input.push(data)
|
||||||
|
self._call_parse()
|
||||||
|
|
||||||
|
def _call_parse(self):
|
||||||
|
try:
|
||||||
|
self._parse()
|
||||||
|
except StopIteration:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Parse all remaining data and return the root message object."""
|
||||||
|
self._input.close()
|
||||||
|
self._call_parse()
|
||||||
|
root = self._pop_message()
|
||||||
|
assert not self._msgstack
|
||||||
|
# Look for final set of defects
|
||||||
|
if root.get_content_maintype() == 'multipart' \
|
||||||
|
and not root.is_multipart():
|
||||||
|
defect = errors.MultipartInvariantViolationDefect()
|
||||||
|
self.policy.handle_defect(root, defect)
|
||||||
|
return root
|
||||||
|
|
||||||
|
def _new_message(self):
|
||||||
|
msg = self._factory(**self._factory_kwds())
|
||||||
|
if self._cur and self._cur.get_content_type() == 'multipart/digest':
|
||||||
|
msg.set_default_type('message/rfc822')
|
||||||
|
if self._msgstack:
|
||||||
|
self._msgstack[-1].attach(msg)
|
||||||
|
self._msgstack.append(msg)
|
||||||
|
self._cur = msg
|
||||||
|
self._last = msg
|
||||||
|
|
||||||
|
def _pop_message(self):
|
||||||
|
retval = self._msgstack.pop()
|
||||||
|
if self._msgstack:
|
||||||
|
self._cur = self._msgstack[-1]
|
||||||
|
else:
|
||||||
|
self._cur = None
|
||||||
|
return retval
|
||||||
|
|
||||||
|
def _parsegen(self):
|
||||||
|
# Create a new message and start by parsing headers.
|
||||||
|
self._new_message()
|
||||||
|
headers = []
|
||||||
|
# Collect the headers, searching for a line that doesn't match the RFC
|
||||||
|
# 2822 header or continuation pattern (including an empty line).
|
||||||
|
for line in self._input:
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
if not headerRE.match(line):
|
||||||
|
# If we saw the RFC defined header/body separator
|
||||||
|
# (i.e. newline), just throw it away. Otherwise the line is
|
||||||
|
# part of the body so push it back.
|
||||||
|
if not NLCRE.match(line):
|
||||||
|
defect = errors.MissingHeaderBodySeparatorDefect()
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
self._input.unreadline(line)
|
||||||
|
break
|
||||||
|
headers.append(line)
|
||||||
|
# Done with the headers, so parse them and figure out what we're
|
||||||
|
# supposed to see in the body of the message.
|
||||||
|
self._parse_headers(headers)
|
||||||
|
# Headers-only parsing is a backwards compatibility hack, which was
|
||||||
|
# necessary in the older parser, which could raise errors. All
|
||||||
|
# remaining lines in the input are thrown into the message body.
|
||||||
|
if self._headersonly:
|
||||||
|
lines = []
|
||||||
|
while True:
|
||||||
|
line = self._input.readline()
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
if line == '':
|
||||||
|
break
|
||||||
|
lines.append(line)
|
||||||
|
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||||
|
return
|
||||||
|
if self._cur.get_content_type() == 'message/delivery-status':
|
||||||
|
# message/delivery-status contains blocks of headers separated by
|
||||||
|
# a blank line. We'll represent each header block as a separate
|
||||||
|
# nested message object, but the processing is a bit different
|
||||||
|
# than standard message/* types because there is no body for the
|
||||||
|
# nested messages. A blank line separates the subparts.
|
||||||
|
while True:
|
||||||
|
self._input.push_eof_matcher(NLCRE.match)
|
||||||
|
for retval in self._parsegen():
|
||||||
|
if retval is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
msg = self._pop_message()
|
||||||
|
# We need to pop the EOF matcher in order to tell if we're at
|
||||||
|
# the end of the current file, not the end of the last block
|
||||||
|
# of message headers.
|
||||||
|
self._input.pop_eof_matcher()
|
||||||
|
# The input stream must be sitting at the newline or at the
|
||||||
|
# EOF. We want to see if we're at the end of this subpart, so
|
||||||
|
# first consume the blank line, then test the next line to see
|
||||||
|
# if we're at this subpart's EOF.
|
||||||
|
while True:
|
||||||
|
line = self._input.readline()
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
while True:
|
||||||
|
line = self._input.readline()
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
if line == '':
|
||||||
|
break
|
||||||
|
# Not at EOF so this is a line we're going to need.
|
||||||
|
self._input.unreadline(line)
|
||||||
|
return
|
||||||
|
if self._cur.get_content_maintype() == 'message':
|
||||||
|
# The message claims to be a message/* type, then what follows is
|
||||||
|
# another RFC 2822 message.
|
||||||
|
for retval in self._parsegen():
|
||||||
|
if retval is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
self._pop_message()
|
||||||
|
return
|
||||||
|
if self._cur.get_content_maintype() == 'multipart':
|
||||||
|
boundary = self._cur.get_boundary()
|
||||||
|
if boundary is None:
|
||||||
|
# The message /claims/ to be a multipart but it has not
|
||||||
|
# defined a boundary. That's a problem which we'll handle by
|
||||||
|
# reading everything until the EOF and marking the message as
|
||||||
|
# defective.
|
||||||
|
defect = errors.NoBoundaryInMultipartDefect()
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
lines = []
|
||||||
|
for line in self._input:
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
lines.append(line)
|
||||||
|
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||||
|
return
|
||||||
|
# Make sure a valid content type was specified per RFC 2045:6.4.
|
||||||
|
if (self._cur.get('content-transfer-encoding', '8bit').lower()
|
||||||
|
not in ('7bit', '8bit', 'binary')):
|
||||||
|
defect = errors.InvalidMultipartContentTransferEncodingDefect()
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
# Create a line match predicate which matches the inter-part
|
||||||
|
# boundary as well as the end-of-multipart boundary. Don't push
|
||||||
|
# this onto the input stream until we've scanned past the
|
||||||
|
# preamble.
|
||||||
|
separator = '--' + boundary
|
||||||
|
boundaryre = re.compile(
|
||||||
|
'(?P<sep>' + re.escape(separator) +
|
||||||
|
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
|
||||||
|
capturing_preamble = True
|
||||||
|
preamble = []
|
||||||
|
linesep = False
|
||||||
|
close_boundary_seen = False
|
||||||
|
while True:
|
||||||
|
line = self._input.readline()
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
if line == '':
|
||||||
|
break
|
||||||
|
mo = boundaryre.match(line)
|
||||||
|
if mo:
|
||||||
|
# If we're looking at the end boundary, we're done with
|
||||||
|
# this multipart. If there was a newline at the end of
|
||||||
|
# the closing boundary, then we need to initialize the
|
||||||
|
# epilogue with the empty string (see below).
|
||||||
|
if mo.group('end'):
|
||||||
|
close_boundary_seen = True
|
||||||
|
linesep = mo.group('linesep')
|
||||||
|
break
|
||||||
|
# We saw an inter-part boundary. Were we in the preamble?
|
||||||
|
if capturing_preamble:
|
||||||
|
if preamble:
|
||||||
|
# According to RFC 2046, the last newline belongs
|
||||||
|
# to the boundary.
|
||||||
|
lastline = preamble[-1]
|
||||||
|
eolmo = NLCRE_eol.search(lastline)
|
||||||
|
if eolmo:
|
||||||
|
preamble[-1] = lastline[:-len(eolmo.group(0))]
|
||||||
|
self._cur.preamble = EMPTYSTRING.join(preamble)
|
||||||
|
capturing_preamble = False
|
||||||
|
self._input.unreadline(line)
|
||||||
|
continue
|
||||||
|
# We saw a boundary separating two parts. Consume any
|
||||||
|
# multiple boundary lines that may be following. Our
|
||||||
|
# interpretation of RFC 2046 BNF grammar does not produce
|
||||||
|
# body parts within such double boundaries.
|
||||||
|
while True:
|
||||||
|
line = self._input.readline()
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
mo = boundaryre.match(line)
|
||||||
|
if not mo:
|
||||||
|
self._input.unreadline(line)
|
||||||
|
break
|
||||||
|
# Recurse to parse this subpart; the input stream points
|
||||||
|
# at the subpart's first line.
|
||||||
|
self._input.push_eof_matcher(boundaryre.match)
|
||||||
|
for retval in self._parsegen():
|
||||||
|
if retval is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
# Because of RFC 2046, the newline preceding the boundary
|
||||||
|
# separator actually belongs to the boundary, not the
|
||||||
|
# previous subpart's payload (or epilogue if the previous
|
||||||
|
# part is a multipart).
|
||||||
|
if self._last.get_content_maintype() == 'multipart':
|
||||||
|
epilogue = self._last.epilogue
|
||||||
|
if epilogue == '':
|
||||||
|
self._last.epilogue = None
|
||||||
|
elif epilogue is not None:
|
||||||
|
mo = NLCRE_eol.search(epilogue)
|
||||||
|
if mo:
|
||||||
|
end = len(mo.group(0))
|
||||||
|
self._last.epilogue = epilogue[:-end]
|
||||||
|
else:
|
||||||
|
payload = self._last._payload
|
||||||
|
if isinstance(payload, str):
|
||||||
|
mo = NLCRE_eol.search(payload)
|
||||||
|
if mo:
|
||||||
|
payload = payload[:-len(mo.group(0))]
|
||||||
|
self._last._payload = payload
|
||||||
|
self._input.pop_eof_matcher()
|
||||||
|
self._pop_message()
|
||||||
|
# Set the multipart up for newline cleansing, which will
|
||||||
|
# happen if we're in a nested multipart.
|
||||||
|
self._last = self._cur
|
||||||
|
else:
|
||||||
|
# I think we must be in the preamble
|
||||||
|
assert capturing_preamble
|
||||||
|
preamble.append(line)
|
||||||
|
# We've seen either the EOF or the end boundary. If we're still
|
||||||
|
# capturing the preamble, we never saw the start boundary. Note
|
||||||
|
# that as a defect and store the captured text as the payload.
|
||||||
|
if capturing_preamble:
|
||||||
|
defect = errors.StartBoundaryNotFoundDefect()
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
self._cur.set_payload(EMPTYSTRING.join(preamble))
|
||||||
|
epilogue = []
|
||||||
|
for line in self._input:
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||||
|
return
|
||||||
|
# If we're not processing the preamble, then we might have seen
|
||||||
|
# EOF without seeing that end boundary...that is also a defect.
|
||||||
|
if not close_boundary_seen:
|
||||||
|
defect = errors.CloseBoundaryNotFoundDefect()
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
return
|
||||||
|
# Everything from here to the EOF is epilogue. If the end boundary
|
||||||
|
# ended in a newline, we'll need to make sure the epilogue isn't
|
||||||
|
# None
|
||||||
|
if linesep:
|
||||||
|
epilogue = ['']
|
||||||
|
else:
|
||||||
|
epilogue = []
|
||||||
|
for line in self._input:
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
epilogue.append(line)
|
||||||
|
# Any CRLF at the front of the epilogue is not technically part of
|
||||||
|
# the epilogue. Also, watch out for an empty string epilogue,
|
||||||
|
# which means a single newline.
|
||||||
|
if epilogue:
|
||||||
|
firstline = epilogue[0]
|
||||||
|
bolmo = NLCRE_bol.match(firstline)
|
||||||
|
if bolmo:
|
||||||
|
epilogue[0] = firstline[len(bolmo.group(0)):]
|
||||||
|
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||||
|
return
|
||||||
|
# Otherwise, it's some non-multipart type, so the entire rest of the
|
||||||
|
# file contents becomes the payload.
|
||||||
|
lines = []
|
||||||
|
for line in self._input:
|
||||||
|
if line is NeedMoreData:
|
||||||
|
yield NeedMoreData
|
||||||
|
continue
|
||||||
|
lines.append(line)
|
||||||
|
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||||
|
|
||||||
|
def _parse_headers(self, lines):
|
||||||
|
# Passed a list of lines that make up the headers for the current msg
|
||||||
|
lastheader = ''
|
||||||
|
lastvalue = []
|
||||||
|
for lineno, line in enumerate(lines):
|
||||||
|
# Check for continuation
|
||||||
|
if line[0] in ' \t':
|
||||||
|
if not lastheader:
|
||||||
|
# The first line of the headers was a continuation. This
|
||||||
|
# is illegal, so let's note the defect, store the illegal
|
||||||
|
# line, and ignore it for purposes of headers.
|
||||||
|
defect = errors.FirstHeaderLineIsContinuationDefect(line)
|
||||||
|
self.policy.handle_defect(self._cur, defect)
|
||||||
|
continue
|
||||||
|
lastvalue.append(line)
|
||||||
|
continue
|
||||||
|
if lastheader:
|
||||||
|
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
|
||||||
|
lastheader, lastvalue = '', []
|
||||||
|
# Check for envelope header, i.e. unix-from
|
||||||
|
if line.startswith('From '):
|
||||||
|
if lineno == 0:
|
||||||
|
# Strip off the trailing newline
|
||||||
|
mo = NLCRE_eol.search(line)
|
||||||
|
if mo:
|
||||||
|
line = line[:-len(mo.group(0))]
|
||||||
|
self._cur.set_unixfrom(line)
|
||||||
|
continue
|
||||||
|
elif lineno == len(lines) - 1:
|
||||||
|
# Something looking like a unix-from at the end - it's
|
||||||
|
# probably the first line of the body, so push back the
|
||||||
|
# line and stop.
|
||||||
|
self._input.unreadline(line)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
# Weirdly placed unix-from line. Note this as a defect
|
||||||
|
# and ignore it.
|
||||||
|
defect = errors.MisplacedEnvelopeHeaderDefect(line)
|
||||||
|
self._cur.defects.append(defect)
|
||||||
|
continue
|
||||||
|
# Split the line on the colon separating field name from value.
|
||||||
|
# There will always be a colon, because if there wasn't the part of
|
||||||
|
# the parser that calls us would have started parsing the body.
|
||||||
|
i = line.find(':')
|
||||||
|
assert i>0, "_parse_headers fed line with no : and no leading WS"
|
||||||
|
lastheader = line[:i]
|
||||||
|
lastvalue = [line]
|
||||||
|
# Done with all the lines, so handle the last header.
|
||||||
|
if lastheader:
|
||||||
|
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
|
||||||
|
|
||||||
|
|
||||||
|
class BytesFeedParser(FeedParser):
|
||||||
|
"""Like FeedParser, but feed accepts bytes."""
|
||||||
|
|
||||||
|
def feed(self, data):
|
||||||
|
super().feed(data.decode('ascii', 'surrogateescape'))
|
498
venv/Lib/site-packages/future/backports/email/generator.py
Normal file
498
venv/Lib/site-packages/future/backports/email/generator.py
Normal file
@ -0,0 +1,498 @@
|
|||||||
|
# Copyright (C) 2001-2010 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Classes to generate plain text from a message object tree."""
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import super
|
||||||
|
from future.builtins import str
|
||||||
|
|
||||||
|
__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from io import StringIO, BytesIO
|
||||||
|
from future.backports.email._policybase import compat32
|
||||||
|
from future.backports.email.header import Header
|
||||||
|
from future.backports.email.utils import _has_surrogates
|
||||||
|
import future.backports.email.charset as _charset
|
||||||
|
|
||||||
|
UNDERSCORE = '_'
|
||||||
|
NL = '\n' # XXX: no longer used by the code below.
|
||||||
|
|
||||||
|
fcre = re.compile(r'^From ', re.MULTILINE)
|
||||||
|
|
||||||
|
|
||||||
|
class Generator(object):
|
||||||
|
"""Generates output from a Message object tree.
|
||||||
|
|
||||||
|
This basic generator writes the message to the given file object as plain
|
||||||
|
text.
|
||||||
|
"""
|
||||||
|
#
|
||||||
|
# Public interface
|
||||||
|
#
|
||||||
|
|
||||||
|
def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, **_3to2kwargs):
|
||||||
|
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||||
|
else: policy = None
|
||||||
|
"""Create the generator for message flattening.
|
||||||
|
|
||||||
|
outfp is the output file-like object for writing the message to. It
|
||||||
|
must have a write() method.
|
||||||
|
|
||||||
|
Optional mangle_from_ is a flag that, when True (the default), escapes
|
||||||
|
From_ lines in the body of the message by putting a `>' in front of
|
||||||
|
them.
|
||||||
|
|
||||||
|
Optional maxheaderlen specifies the longest length for a non-continued
|
||||||
|
header. When a header line is longer (in characters, with tabs
|
||||||
|
expanded to 8 spaces) than maxheaderlen, the header will split as
|
||||||
|
defined in the Header class. Set maxheaderlen to zero to disable
|
||||||
|
header wrapping. The default is 78, as recommended (but not required)
|
||||||
|
by RFC 2822.
|
||||||
|
|
||||||
|
The policy keyword specifies a policy object that controls a number of
|
||||||
|
aspects of the generator's operation. The default policy maintains
|
||||||
|
backward compatibility.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._fp = outfp
|
||||||
|
self._mangle_from_ = mangle_from_
|
||||||
|
self.maxheaderlen = maxheaderlen
|
||||||
|
self.policy = policy
|
||||||
|
|
||||||
|
def write(self, s):
|
||||||
|
# Just delegate to the file object
|
||||||
|
self._fp.write(s)
|
||||||
|
|
||||||
|
def flatten(self, msg, unixfrom=False, linesep=None):
|
||||||
|
r"""Print the message object tree rooted at msg to the output file
|
||||||
|
specified when the Generator instance was created.
|
||||||
|
|
||||||
|
unixfrom is a flag that forces the printing of a Unix From_ delimiter
|
||||||
|
before the first object in the message tree. If the original message
|
||||||
|
has no From_ delimiter, a `standard' one is crafted. By default, this
|
||||||
|
is False to inhibit the printing of any From_ delimiter.
|
||||||
|
|
||||||
|
Note that for subobjects, no From_ line is printed.
|
||||||
|
|
||||||
|
linesep specifies the characters used to indicate a new line in
|
||||||
|
the output. The default value is determined by the policy.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# We use the _XXX constants for operating on data that comes directly
|
||||||
|
# from the msg, and _encoded_XXX constants for operating on data that
|
||||||
|
# has already been converted (to bytes in the BytesGenerator) and
|
||||||
|
# inserted into a temporary buffer.
|
||||||
|
policy = msg.policy if self.policy is None else self.policy
|
||||||
|
if linesep is not None:
|
||||||
|
policy = policy.clone(linesep=linesep)
|
||||||
|
if self.maxheaderlen is not None:
|
||||||
|
policy = policy.clone(max_line_length=self.maxheaderlen)
|
||||||
|
self._NL = policy.linesep
|
||||||
|
self._encoded_NL = self._encode(self._NL)
|
||||||
|
self._EMPTY = ''
|
||||||
|
self._encoded_EMTPY = self._encode('')
|
||||||
|
# Because we use clone (below) when we recursively process message
|
||||||
|
# subparts, and because clone uses the computed policy (not None),
|
||||||
|
# submessages will automatically get set to the computed policy when
|
||||||
|
# they are processed by this code.
|
||||||
|
old_gen_policy = self.policy
|
||||||
|
old_msg_policy = msg.policy
|
||||||
|
try:
|
||||||
|
self.policy = policy
|
||||||
|
msg.policy = policy
|
||||||
|
if unixfrom:
|
||||||
|
ufrom = msg.get_unixfrom()
|
||||||
|
if not ufrom:
|
||||||
|
ufrom = 'From nobody ' + time.ctime(time.time())
|
||||||
|
self.write(ufrom + self._NL)
|
||||||
|
self._write(msg)
|
||||||
|
finally:
|
||||||
|
self.policy = old_gen_policy
|
||||||
|
msg.policy = old_msg_policy
|
||||||
|
|
||||||
|
def clone(self, fp):
|
||||||
|
"""Clone this generator with the exact same options."""
|
||||||
|
return self.__class__(fp,
|
||||||
|
self._mangle_from_,
|
||||||
|
None, # Use policy setting, which we've adjusted
|
||||||
|
policy=self.policy)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Protected interface - undocumented ;/
|
||||||
|
#
|
||||||
|
|
||||||
|
# Note that we use 'self.write' when what we are writing is coming from
|
||||||
|
# the source, and self._fp.write when what we are writing is coming from a
|
||||||
|
# buffer (because the Bytes subclass has already had a chance to transform
|
||||||
|
# the data in its write method in that case). This is an entirely
|
||||||
|
# pragmatic split determined by experiment; we could be more general by
|
||||||
|
# always using write and having the Bytes subclass write method detect when
|
||||||
|
# it has already transformed the input; but, since this whole thing is a
|
||||||
|
# hack anyway this seems good enough.
|
||||||
|
|
||||||
|
# Similarly, we have _XXX and _encoded_XXX attributes that are used on
|
||||||
|
# source and buffer data, respectively.
|
||||||
|
_encoded_EMPTY = ''
|
||||||
|
|
||||||
|
def _new_buffer(self):
|
||||||
|
# BytesGenerator overrides this to return BytesIO.
|
||||||
|
return StringIO()
|
||||||
|
|
||||||
|
def _encode(self, s):
|
||||||
|
# BytesGenerator overrides this to encode strings to bytes.
|
||||||
|
return s
|
||||||
|
|
||||||
|
def _write_lines(self, lines):
|
||||||
|
# We have to transform the line endings.
|
||||||
|
if not lines:
|
||||||
|
return
|
||||||
|
lines = lines.splitlines(True)
|
||||||
|
for line in lines[:-1]:
|
||||||
|
self.write(line.rstrip('\r\n'))
|
||||||
|
self.write(self._NL)
|
||||||
|
laststripped = lines[-1].rstrip('\r\n')
|
||||||
|
self.write(laststripped)
|
||||||
|
if len(lines[-1]) != len(laststripped):
|
||||||
|
self.write(self._NL)
|
||||||
|
|
||||||
|
def _write(self, msg):
|
||||||
|
# We can't write the headers yet because of the following scenario:
|
||||||
|
# say a multipart message includes the boundary string somewhere in
|
||||||
|
# its body. We'd have to calculate the new boundary /before/ we write
|
||||||
|
# the headers so that we can write the correct Content-Type:
|
||||||
|
# parameter.
|
||||||
|
#
|
||||||
|
# The way we do this, so as to make the _handle_*() methods simpler,
|
||||||
|
# is to cache any subpart writes into a buffer. The we write the
|
||||||
|
# headers and the buffer contents. That way, subpart handlers can
|
||||||
|
# Do The Right Thing, and can still modify the Content-Type: header if
|
||||||
|
# necessary.
|
||||||
|
oldfp = self._fp
|
||||||
|
try:
|
||||||
|
self._fp = sfp = self._new_buffer()
|
||||||
|
self._dispatch(msg)
|
||||||
|
finally:
|
||||||
|
self._fp = oldfp
|
||||||
|
# Write the headers. First we see if the message object wants to
|
||||||
|
# handle that itself. If not, we'll do it generically.
|
||||||
|
meth = getattr(msg, '_write_headers', None)
|
||||||
|
if meth is None:
|
||||||
|
self._write_headers(msg)
|
||||||
|
else:
|
||||||
|
meth(self)
|
||||||
|
self._fp.write(sfp.getvalue())
|
||||||
|
|
||||||
|
def _dispatch(self, msg):
|
||||||
|
# Get the Content-Type: for the message, then try to dispatch to
|
||||||
|
# self._handle_<maintype>_<subtype>(). If there's no handler for the
|
||||||
|
# full MIME type, then dispatch to self._handle_<maintype>(). If
|
||||||
|
# that's missing too, then dispatch to self._writeBody().
|
||||||
|
main = msg.get_content_maintype()
|
||||||
|
sub = msg.get_content_subtype()
|
||||||
|
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
|
||||||
|
meth = getattr(self, '_handle_' + specific, None)
|
||||||
|
if meth is None:
|
||||||
|
generic = main.replace('-', '_')
|
||||||
|
meth = getattr(self, '_handle_' + generic, None)
|
||||||
|
if meth is None:
|
||||||
|
meth = self._writeBody
|
||||||
|
meth(msg)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Default handlers
|
||||||
|
#
|
||||||
|
|
||||||
|
def _write_headers(self, msg):
|
||||||
|
for h, v in msg.raw_items():
|
||||||
|
self.write(self.policy.fold(h, v))
|
||||||
|
# A blank line always separates headers from body
|
||||||
|
self.write(self._NL)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Handlers for writing types and subtypes
|
||||||
|
#
|
||||||
|
|
||||||
|
def _handle_text(self, msg):
|
||||||
|
payload = msg.get_payload()
|
||||||
|
if payload is None:
|
||||||
|
return
|
||||||
|
if not isinstance(payload, str):
|
||||||
|
raise TypeError('string payload expected: %s' % type(payload))
|
||||||
|
if _has_surrogates(msg._payload):
|
||||||
|
charset = msg.get_param('charset')
|
||||||
|
if charset is not None:
|
||||||
|
del msg['content-transfer-encoding']
|
||||||
|
msg.set_payload(payload, charset)
|
||||||
|
payload = msg.get_payload()
|
||||||
|
if self._mangle_from_:
|
||||||
|
payload = fcre.sub('>From ', payload)
|
||||||
|
self._write_lines(payload)
|
||||||
|
|
||||||
|
# Default body handler
|
||||||
|
_writeBody = _handle_text
|
||||||
|
|
||||||
|
def _handle_multipart(self, msg):
|
||||||
|
# The trick here is to write out each part separately, merge them all
|
||||||
|
# together, and then make sure that the boundary we've chosen isn't
|
||||||
|
# present in the payload.
|
||||||
|
msgtexts = []
|
||||||
|
subparts = msg.get_payload()
|
||||||
|
if subparts is None:
|
||||||
|
subparts = []
|
||||||
|
elif isinstance(subparts, str):
|
||||||
|
# e.g. a non-strict parse of a message with no starting boundary.
|
||||||
|
self.write(subparts)
|
||||||
|
return
|
||||||
|
elif not isinstance(subparts, list):
|
||||||
|
# Scalar payload
|
||||||
|
subparts = [subparts]
|
||||||
|
for part in subparts:
|
||||||
|
s = self._new_buffer()
|
||||||
|
g = self.clone(s)
|
||||||
|
g.flatten(part, unixfrom=False, linesep=self._NL)
|
||||||
|
msgtexts.append(s.getvalue())
|
||||||
|
# BAW: What about boundaries that are wrapped in double-quotes?
|
||||||
|
boundary = msg.get_boundary()
|
||||||
|
if not boundary:
|
||||||
|
# Create a boundary that doesn't appear in any of the
|
||||||
|
# message texts.
|
||||||
|
alltext = self._encoded_NL.join(msgtexts)
|
||||||
|
boundary = self._make_boundary(alltext)
|
||||||
|
msg.set_boundary(boundary)
|
||||||
|
# If there's a preamble, write it out, with a trailing CRLF
|
||||||
|
if msg.preamble is not None:
|
||||||
|
if self._mangle_from_:
|
||||||
|
preamble = fcre.sub('>From ', msg.preamble)
|
||||||
|
else:
|
||||||
|
preamble = msg.preamble
|
||||||
|
self._write_lines(preamble)
|
||||||
|
self.write(self._NL)
|
||||||
|
# dash-boundary transport-padding CRLF
|
||||||
|
self.write('--' + boundary + self._NL)
|
||||||
|
# body-part
|
||||||
|
if msgtexts:
|
||||||
|
self._fp.write(msgtexts.pop(0))
|
||||||
|
# *encapsulation
|
||||||
|
# --> delimiter transport-padding
|
||||||
|
# --> CRLF body-part
|
||||||
|
for body_part in msgtexts:
|
||||||
|
# delimiter transport-padding CRLF
|
||||||
|
self.write(self._NL + '--' + boundary + self._NL)
|
||||||
|
# body-part
|
||||||
|
self._fp.write(body_part)
|
||||||
|
# close-delimiter transport-padding
|
||||||
|
self.write(self._NL + '--' + boundary + '--')
|
||||||
|
if msg.epilogue is not None:
|
||||||
|
self.write(self._NL)
|
||||||
|
if self._mangle_from_:
|
||||||
|
epilogue = fcre.sub('>From ', msg.epilogue)
|
||||||
|
else:
|
||||||
|
epilogue = msg.epilogue
|
||||||
|
self._write_lines(epilogue)
|
||||||
|
|
||||||
|
def _handle_multipart_signed(self, msg):
|
||||||
|
# The contents of signed parts has to stay unmodified in order to keep
|
||||||
|
# the signature intact per RFC1847 2.1, so we disable header wrapping.
|
||||||
|
# RDM: This isn't enough to completely preserve the part, but it helps.
|
||||||
|
p = self.policy
|
||||||
|
self.policy = p.clone(max_line_length=0)
|
||||||
|
try:
|
||||||
|
self._handle_multipart(msg)
|
||||||
|
finally:
|
||||||
|
self.policy = p
|
||||||
|
|
||||||
|
def _handle_message_delivery_status(self, msg):
|
||||||
|
# We can't just write the headers directly to self's file object
|
||||||
|
# because this will leave an extra newline between the last header
|
||||||
|
# block and the boundary. Sigh.
|
||||||
|
blocks = []
|
||||||
|
for part in msg.get_payload():
|
||||||
|
s = self._new_buffer()
|
||||||
|
g = self.clone(s)
|
||||||
|
g.flatten(part, unixfrom=False, linesep=self._NL)
|
||||||
|
text = s.getvalue()
|
||||||
|
lines = text.split(self._encoded_NL)
|
||||||
|
# Strip off the unnecessary trailing empty line
|
||||||
|
if lines and lines[-1] == self._encoded_EMPTY:
|
||||||
|
blocks.append(self._encoded_NL.join(lines[:-1]))
|
||||||
|
else:
|
||||||
|
blocks.append(text)
|
||||||
|
# Now join all the blocks with an empty line. This has the lovely
|
||||||
|
# effect of separating each block with an empty line, but not adding
|
||||||
|
# an extra one after the last one.
|
||||||
|
self._fp.write(self._encoded_NL.join(blocks))
|
||||||
|
|
||||||
|
def _handle_message(self, msg):
|
||||||
|
s = self._new_buffer()
|
||||||
|
g = self.clone(s)
|
||||||
|
# The payload of a message/rfc822 part should be a multipart sequence
|
||||||
|
# of length 1. The zeroth element of the list should be the Message
|
||||||
|
# object for the subpart. Extract that object, stringify it, and
|
||||||
|
# write it out.
|
||||||
|
# Except, it turns out, when it's a string instead, which happens when
|
||||||
|
# and only when HeaderParser is used on a message of mime type
|
||||||
|
# message/rfc822. Such messages are generated by, for example,
|
||||||
|
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
|
||||||
|
# in that case we just emit the string body.
|
||||||
|
payload = msg._payload
|
||||||
|
if isinstance(payload, list):
|
||||||
|
g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
|
||||||
|
payload = s.getvalue()
|
||||||
|
else:
|
||||||
|
payload = self._encode(payload)
|
||||||
|
self._fp.write(payload)
|
||||||
|
|
||||||
|
# This used to be a module level function; we use a classmethod for this
|
||||||
|
# and _compile_re so we can continue to provide the module level function
|
||||||
|
# for backward compatibility by doing
|
||||||
|
# _make_boudary = Generator._make_boundary
|
||||||
|
# at the end of the module. It *is* internal, so we could drop that...
|
||||||
|
@classmethod
|
||||||
|
def _make_boundary(cls, text=None):
|
||||||
|
# Craft a random boundary. If text is given, ensure that the chosen
|
||||||
|
# boundary doesn't appear in the text.
|
||||||
|
token = random.randrange(sys.maxsize)
|
||||||
|
boundary = ('=' * 15) + (_fmt % token) + '=='
|
||||||
|
if text is None:
|
||||||
|
return boundary
|
||||||
|
b = boundary
|
||||||
|
counter = 0
|
||||||
|
while True:
|
||||||
|
cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
|
||||||
|
if not cre.search(text):
|
||||||
|
break
|
||||||
|
b = boundary + '.' + str(counter)
|
||||||
|
counter += 1
|
||||||
|
return b
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _compile_re(cls, s, flags):
|
||||||
|
return re.compile(s, flags)
|
||||||
|
|
||||||
|
class BytesGenerator(Generator):
|
||||||
|
"""Generates a bytes version of a Message object tree.
|
||||||
|
|
||||||
|
Functionally identical to the base Generator except that the output is
|
||||||
|
bytes and not string. When surrogates were used in the input to encode
|
||||||
|
bytes, these are decoded back to bytes for output. If the policy has
|
||||||
|
cte_type set to 7bit, then the message is transformed such that the
|
||||||
|
non-ASCII bytes are properly content transfer encoded, using the charset
|
||||||
|
unknown-8bit.
|
||||||
|
|
||||||
|
The outfp object must accept bytes in its write method.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Bytes versions of this constant for use in manipulating data from
|
||||||
|
# the BytesIO buffer.
|
||||||
|
_encoded_EMPTY = b''
|
||||||
|
|
||||||
|
def write(self, s):
|
||||||
|
self._fp.write(str(s).encode('ascii', 'surrogateescape'))
|
||||||
|
|
||||||
|
def _new_buffer(self):
|
||||||
|
return BytesIO()
|
||||||
|
|
||||||
|
def _encode(self, s):
|
||||||
|
return s.encode('ascii')
|
||||||
|
|
||||||
|
def _write_headers(self, msg):
|
||||||
|
# This is almost the same as the string version, except for handling
|
||||||
|
# strings with 8bit bytes.
|
||||||
|
for h, v in msg.raw_items():
|
||||||
|
self._fp.write(self.policy.fold_binary(h, v))
|
||||||
|
# A blank line always separates headers from body
|
||||||
|
self.write(self._NL)
|
||||||
|
|
||||||
|
def _handle_text(self, msg):
|
||||||
|
# If the string has surrogates the original source was bytes, so
|
||||||
|
# just write it back out.
|
||||||
|
if msg._payload is None:
|
||||||
|
return
|
||||||
|
if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
|
||||||
|
if self._mangle_from_:
|
||||||
|
msg._payload = fcre.sub(">From ", msg._payload)
|
||||||
|
self._write_lines(msg._payload)
|
||||||
|
else:
|
||||||
|
super(BytesGenerator,self)._handle_text(msg)
|
||||||
|
|
||||||
|
# Default body handler
|
||||||
|
_writeBody = _handle_text
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _compile_re(cls, s, flags):
|
||||||
|
return re.compile(s.encode('ascii'), flags)
|
||||||
|
|
||||||
|
|
||||||
|
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
|
||||||
|
|
||||||
|
class DecodedGenerator(Generator):
|
||||||
|
"""Generates a text representation of a message.
|
||||||
|
|
||||||
|
Like the Generator base class, except that non-text parts are substituted
|
||||||
|
with a format string representing the part.
|
||||||
|
"""
|
||||||
|
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
|
||||||
|
"""Like Generator.__init__() except that an additional optional
|
||||||
|
argument is allowed.
|
||||||
|
|
||||||
|
Walks through all subparts of a message. If the subpart is of main
|
||||||
|
type `text', then it prints the decoded payload of the subpart.
|
||||||
|
|
||||||
|
Otherwise, fmt is a format string that is used instead of the message
|
||||||
|
payload. fmt is expanded with the following keywords (in
|
||||||
|
%(keyword)s format):
|
||||||
|
|
||||||
|
type : Full MIME type of the non-text part
|
||||||
|
maintype : Main MIME type of the non-text part
|
||||||
|
subtype : Sub-MIME type of the non-text part
|
||||||
|
filename : Filename of the non-text part
|
||||||
|
description: Description associated with the non-text part
|
||||||
|
encoding : Content transfer encoding of the non-text part
|
||||||
|
|
||||||
|
The default value for fmt is None, meaning
|
||||||
|
|
||||||
|
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
|
||||||
|
"""
|
||||||
|
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
|
||||||
|
if fmt is None:
|
||||||
|
self._fmt = _FMT
|
||||||
|
else:
|
||||||
|
self._fmt = fmt
|
||||||
|
|
||||||
|
def _dispatch(self, msg):
|
||||||
|
for part in msg.walk():
|
||||||
|
maintype = part.get_content_maintype()
|
||||||
|
if maintype == 'text':
|
||||||
|
print(part.get_payload(decode=False), file=self)
|
||||||
|
elif maintype == 'multipart':
|
||||||
|
# Just skip this
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
print(self._fmt % {
|
||||||
|
'type' : part.get_content_type(),
|
||||||
|
'maintype' : part.get_content_maintype(),
|
||||||
|
'subtype' : part.get_content_subtype(),
|
||||||
|
'filename' : part.get_filename('[no filename]'),
|
||||||
|
'description': part.get('Content-Description',
|
||||||
|
'[no description]'),
|
||||||
|
'encoding' : part.get('Content-Transfer-Encoding',
|
||||||
|
'[no encoding]'),
|
||||||
|
}, file=self)
|
||||||
|
|
||||||
|
|
||||||
|
# Helper used by Generator._make_boundary
|
||||||
|
_width = len(repr(sys.maxsize-1))
|
||||||
|
_fmt = '%%0%dd' % _width
|
||||||
|
|
||||||
|
# Backward compatibility
|
||||||
|
_make_boundary = Generator._make_boundary
|
581
venv/Lib/site-packages/future/backports/email/header.py
Normal file
581
venv/Lib/site-packages/future/backports/email/header.py
Normal file
@ -0,0 +1,581 @@
|
|||||||
|
# Copyright (C) 2002-2007 Python Software Foundation
|
||||||
|
# Author: Ben Gertzfield, Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Header encoding and decoding functionality."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import bytes, range, str, super, zip
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Header',
|
||||||
|
'decode_header',
|
||||||
|
'make_header',
|
||||||
|
]
|
||||||
|
|
||||||
|
import re
|
||||||
|
import binascii
|
||||||
|
|
||||||
|
from future.backports import email
|
||||||
|
from future.backports.email import base64mime
|
||||||
|
from future.backports.email.errors import HeaderParseError
|
||||||
|
import future.backports.email.charset as _charset
|
||||||
|
|
||||||
|
# Helpers
|
||||||
|
from future.backports.email.quoprimime import _max_append, header_decode
|
||||||
|
|
||||||
|
Charset = _charset.Charset
|
||||||
|
|
||||||
|
NL = '\n'
|
||||||
|
SPACE = ' '
|
||||||
|
BSPACE = b' '
|
||||||
|
SPACE8 = ' ' * 8
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
MAXLINELEN = 78
|
||||||
|
FWS = ' \t'
|
||||||
|
|
||||||
|
USASCII = Charset('us-ascii')
|
||||||
|
UTF8 = Charset('utf-8')
|
||||||
|
|
||||||
|
# Match encoded-word strings in the form =?charset?q?Hello_World?=
|
||||||
|
ecre = re.compile(r'''
|
||||||
|
=\? # literal =?
|
||||||
|
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
|
||||||
|
\? # literal ?
|
||||||
|
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
|
||||||
|
\? # literal ?
|
||||||
|
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
|
||||||
|
\?= # literal ?=
|
||||||
|
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
|
||||||
|
|
||||||
|
# Field name regexp, including trailing colon, but not separating whitespace,
|
||||||
|
# according to RFC 2822. Character range is from tilde to exclamation mark.
|
||||||
|
# For use with .match()
|
||||||
|
fcre = re.compile(r'[\041-\176]+:$')
|
||||||
|
|
||||||
|
# Find a header embedded in a putative header value. Used to check for
|
||||||
|
# header injection attack.
|
||||||
|
_embeded_header = re.compile(r'\n[^ \t]+:')
|
||||||
|
|
||||||
|
|
||||||
|
def decode_header(header):
|
||||||
|
"""Decode a message header value without converting charset.
|
||||||
|
|
||||||
|
Returns a list of (string, charset) pairs containing each of the decoded
|
||||||
|
parts of the header. Charset is None for non-encoded parts of the header,
|
||||||
|
otherwise a lower-case string containing the name of the character set
|
||||||
|
specified in the encoded string.
|
||||||
|
|
||||||
|
header may be a string that may or may not contain RFC2047 encoded words,
|
||||||
|
or it may be a Header object.
|
||||||
|
|
||||||
|
An email.errors.HeaderParseError may be raised when certain decoding error
|
||||||
|
occurs (e.g. a base64 decoding exception).
|
||||||
|
"""
|
||||||
|
# If it is a Header object, we can just return the encoded chunks.
|
||||||
|
if hasattr(header, '_chunks'):
|
||||||
|
return [(_charset._encode(string, str(charset)), str(charset))
|
||||||
|
for string, charset in header._chunks]
|
||||||
|
# If no encoding, just return the header with no charset.
|
||||||
|
if not ecre.search(header):
|
||||||
|
return [(header, None)]
|
||||||
|
# First step is to parse all the encoded parts into triplets of the form
|
||||||
|
# (encoded_string, encoding, charset). For unencoded strings, the last
|
||||||
|
# two parts will be None.
|
||||||
|
words = []
|
||||||
|
for line in header.splitlines():
|
||||||
|
parts = ecre.split(line)
|
||||||
|
first = True
|
||||||
|
while parts:
|
||||||
|
unencoded = parts.pop(0)
|
||||||
|
if first:
|
||||||
|
unencoded = unencoded.lstrip()
|
||||||
|
first = False
|
||||||
|
if unencoded:
|
||||||
|
words.append((unencoded, None, None))
|
||||||
|
if parts:
|
||||||
|
charset = parts.pop(0).lower()
|
||||||
|
encoding = parts.pop(0).lower()
|
||||||
|
encoded = parts.pop(0)
|
||||||
|
words.append((encoded, encoding, charset))
|
||||||
|
# Now loop over words and remove words that consist of whitespace
|
||||||
|
# between two encoded strings.
|
||||||
|
import sys
|
||||||
|
droplist = []
|
||||||
|
for n, w in enumerate(words):
|
||||||
|
if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace():
|
||||||
|
droplist.append(n-1)
|
||||||
|
for d in reversed(droplist):
|
||||||
|
del words[d]
|
||||||
|
|
||||||
|
# The next step is to decode each encoded word by applying the reverse
|
||||||
|
# base64 or quopri transformation. decoded_words is now a list of the
|
||||||
|
# form (decoded_word, charset).
|
||||||
|
decoded_words = []
|
||||||
|
for encoded_string, encoding, charset in words:
|
||||||
|
if encoding is None:
|
||||||
|
# This is an unencoded word.
|
||||||
|
decoded_words.append((encoded_string, charset))
|
||||||
|
elif encoding == 'q':
|
||||||
|
word = header_decode(encoded_string)
|
||||||
|
decoded_words.append((word, charset))
|
||||||
|
elif encoding == 'b':
|
||||||
|
paderr = len(encoded_string) % 4 # Postel's law: add missing padding
|
||||||
|
if paderr:
|
||||||
|
encoded_string += '==='[:4 - paderr]
|
||||||
|
try:
|
||||||
|
word = base64mime.decode(encoded_string)
|
||||||
|
except binascii.Error:
|
||||||
|
raise HeaderParseError('Base64 decoding error')
|
||||||
|
else:
|
||||||
|
decoded_words.append((word, charset))
|
||||||
|
else:
|
||||||
|
raise AssertionError('Unexpected encoding: ' + encoding)
|
||||||
|
# Now convert all words to bytes and collapse consecutive runs of
|
||||||
|
# similarly encoded words.
|
||||||
|
collapsed = []
|
||||||
|
last_word = last_charset = None
|
||||||
|
for word, charset in decoded_words:
|
||||||
|
if isinstance(word, str):
|
||||||
|
word = bytes(word, 'raw-unicode-escape')
|
||||||
|
if last_word is None:
|
||||||
|
last_word = word
|
||||||
|
last_charset = charset
|
||||||
|
elif charset != last_charset:
|
||||||
|
collapsed.append((last_word, last_charset))
|
||||||
|
last_word = word
|
||||||
|
last_charset = charset
|
||||||
|
elif last_charset is None:
|
||||||
|
last_word += BSPACE + word
|
||||||
|
else:
|
||||||
|
last_word += word
|
||||||
|
collapsed.append((last_word, last_charset))
|
||||||
|
return collapsed
|
||||||
|
|
||||||
|
|
||||||
|
def make_header(decoded_seq, maxlinelen=None, header_name=None,
|
||||||
|
continuation_ws=' '):
|
||||||
|
"""Create a Header from a sequence of pairs as returned by decode_header()
|
||||||
|
|
||||||
|
decode_header() takes a header value string and returns a sequence of
|
||||||
|
pairs of the format (decoded_string, charset) where charset is the string
|
||||||
|
name of the character set.
|
||||||
|
|
||||||
|
This function takes one of those sequence of pairs and returns a Header
|
||||||
|
instance. Optional maxlinelen, header_name, and continuation_ws are as in
|
||||||
|
the Header constructor.
|
||||||
|
"""
|
||||||
|
h = Header(maxlinelen=maxlinelen, header_name=header_name,
|
||||||
|
continuation_ws=continuation_ws)
|
||||||
|
for s, charset in decoded_seq:
|
||||||
|
# None means us-ascii but we can simply pass it on to h.append()
|
||||||
|
if charset is not None and not isinstance(charset, Charset):
|
||||||
|
charset = Charset(charset)
|
||||||
|
h.append(s, charset)
|
||||||
|
return h
|
||||||
|
|
||||||
|
|
||||||
|
class Header(object):
|
||||||
|
def __init__(self, s=None, charset=None,
|
||||||
|
maxlinelen=None, header_name=None,
|
||||||
|
continuation_ws=' ', errors='strict'):
|
||||||
|
"""Create a MIME-compliant header that can contain many character sets.
|
||||||
|
|
||||||
|
Optional s is the initial header value. If None, the initial header
|
||||||
|
value is not set. You can later append to the header with .append()
|
||||||
|
method calls. s may be a byte string or a Unicode string, but see the
|
||||||
|
.append() documentation for semantics.
|
||||||
|
|
||||||
|
Optional charset serves two purposes: it has the same meaning as the
|
||||||
|
charset argument to the .append() method. It also sets the default
|
||||||
|
character set for all subsequent .append() calls that omit the charset
|
||||||
|
argument. If charset is not provided in the constructor, the us-ascii
|
||||||
|
charset is used both as s's initial charset and as the default for
|
||||||
|
subsequent .append() calls.
|
||||||
|
|
||||||
|
The maximum line length can be specified explicitly via maxlinelen. For
|
||||||
|
splitting the first line to a shorter value (to account for the field
|
||||||
|
header which isn't included in s, e.g. `Subject') pass in the name of
|
||||||
|
the field in header_name. The default maxlinelen is 78 as recommended
|
||||||
|
by RFC 2822.
|
||||||
|
|
||||||
|
continuation_ws must be RFC 2822 compliant folding whitespace (usually
|
||||||
|
either a space or a hard tab) which will be prepended to continuation
|
||||||
|
lines.
|
||||||
|
|
||||||
|
errors is passed through to the .append() call.
|
||||||
|
"""
|
||||||
|
if charset is None:
|
||||||
|
charset = USASCII
|
||||||
|
elif not isinstance(charset, Charset):
|
||||||
|
charset = Charset(charset)
|
||||||
|
self._charset = charset
|
||||||
|
self._continuation_ws = continuation_ws
|
||||||
|
self._chunks = []
|
||||||
|
if s is not None:
|
||||||
|
self.append(s, charset, errors)
|
||||||
|
if maxlinelen is None:
|
||||||
|
maxlinelen = MAXLINELEN
|
||||||
|
self._maxlinelen = maxlinelen
|
||||||
|
if header_name is None:
|
||||||
|
self._headerlen = 0
|
||||||
|
else:
|
||||||
|
# Take the separating colon and space into account.
|
||||||
|
self._headerlen = len(header_name) + 2
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""Return the string value of the header."""
|
||||||
|
self._normalize()
|
||||||
|
uchunks = []
|
||||||
|
lastcs = None
|
||||||
|
lastspace = None
|
||||||
|
for string, charset in self._chunks:
|
||||||
|
# We must preserve spaces between encoded and non-encoded word
|
||||||
|
# boundaries, which means for us we need to add a space when we go
|
||||||
|
# from a charset to None/us-ascii, or from None/us-ascii to a
|
||||||
|
# charset. Only do this for the second and subsequent chunks.
|
||||||
|
# Don't add a space if the None/us-ascii string already has
|
||||||
|
# a space (trailing or leading depending on transition)
|
||||||
|
nextcs = charset
|
||||||
|
if nextcs == _charset.UNKNOWN8BIT:
|
||||||
|
original_bytes = string.encode('ascii', 'surrogateescape')
|
||||||
|
string = original_bytes.decode('ascii', 'replace')
|
||||||
|
if uchunks:
|
||||||
|
hasspace = string and self._nonctext(string[0])
|
||||||
|
if lastcs not in (None, 'us-ascii'):
|
||||||
|
if nextcs in (None, 'us-ascii') and not hasspace:
|
||||||
|
uchunks.append(SPACE)
|
||||||
|
nextcs = None
|
||||||
|
elif nextcs not in (None, 'us-ascii') and not lastspace:
|
||||||
|
uchunks.append(SPACE)
|
||||||
|
lastspace = string and self._nonctext(string[-1])
|
||||||
|
lastcs = nextcs
|
||||||
|
uchunks.append(string)
|
||||||
|
return EMPTYSTRING.join(uchunks)
|
||||||
|
|
||||||
|
# Rich comparison operators for equality only. BAW: does it make sense to
|
||||||
|
# have or explicitly disable <, <=, >, >= operators?
|
||||||
|
def __eq__(self, other):
|
||||||
|
# other may be a Header or a string. Both are fine so coerce
|
||||||
|
# ourselves to a unicode (of the unencoded header value), swap the
|
||||||
|
# args and do another comparison.
|
||||||
|
return other == str(self)
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self == other
|
||||||
|
|
||||||
|
def append(self, s, charset=None, errors='strict'):
|
||||||
|
"""Append a string to the MIME header.
|
||||||
|
|
||||||
|
Optional charset, if given, should be a Charset instance or the name
|
||||||
|
of a character set (which will be converted to a Charset instance). A
|
||||||
|
value of None (the default) means that the charset given in the
|
||||||
|
constructor is used.
|
||||||
|
|
||||||
|
s may be a byte string or a Unicode string. If it is a byte string
|
||||||
|
(i.e. isinstance(s, str) is false), then charset is the encoding of
|
||||||
|
that byte string, and a UnicodeError will be raised if the string
|
||||||
|
cannot be decoded with that charset. If s is a Unicode string, then
|
||||||
|
charset is a hint specifying the character set of the characters in
|
||||||
|
the string. In either case, when producing an RFC 2822 compliant
|
||||||
|
header using RFC 2047 rules, the string will be encoded using the
|
||||||
|
output codec of the charset. If the string cannot be encoded to the
|
||||||
|
output codec, a UnicodeError will be raised.
|
||||||
|
|
||||||
|
Optional `errors' is passed as the errors argument to the decode
|
||||||
|
call if s is a byte string.
|
||||||
|
"""
|
||||||
|
if charset is None:
|
||||||
|
charset = self._charset
|
||||||
|
elif not isinstance(charset, Charset):
|
||||||
|
charset = Charset(charset)
|
||||||
|
if not isinstance(s, str):
|
||||||
|
input_charset = charset.input_codec or 'us-ascii'
|
||||||
|
if input_charset == _charset.UNKNOWN8BIT:
|
||||||
|
s = s.decode('us-ascii', 'surrogateescape')
|
||||||
|
else:
|
||||||
|
s = s.decode(input_charset, errors)
|
||||||
|
# Ensure that the bytes we're storing can be decoded to the output
|
||||||
|
# character set, otherwise an early error is raised.
|
||||||
|
output_charset = charset.output_codec or 'us-ascii'
|
||||||
|
if output_charset != _charset.UNKNOWN8BIT:
|
||||||
|
try:
|
||||||
|
s.encode(output_charset, errors)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
if output_charset!='us-ascii':
|
||||||
|
raise
|
||||||
|
charset = UTF8
|
||||||
|
self._chunks.append((s, charset))
|
||||||
|
|
||||||
|
def _nonctext(self, s):
|
||||||
|
"""True if string s is not a ctext character of RFC822.
|
||||||
|
"""
|
||||||
|
return s.isspace() or s in ('(', ')', '\\')
|
||||||
|
|
||||||
|
def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
|
||||||
|
r"""Encode a message header into an RFC-compliant format.
|
||||||
|
|
||||||
|
There are many issues involved in converting a given string for use in
|
||||||
|
an email header. Only certain character sets are readable in most
|
||||||
|
email clients, and as header strings can only contain a subset of
|
||||||
|
7-bit ASCII, care must be taken to properly convert and encode (with
|
||||||
|
Base64 or quoted-printable) header strings. In addition, there is a
|
||||||
|
75-character length limit on any given encoded header field, so
|
||||||
|
line-wrapping must be performed, even with double-byte character sets.
|
||||||
|
|
||||||
|
Optional maxlinelen specifies the maximum length of each generated
|
||||||
|
line, exclusive of the linesep string. Individual lines may be longer
|
||||||
|
than maxlinelen if a folding point cannot be found. The first line
|
||||||
|
will be shorter by the length of the header name plus ": " if a header
|
||||||
|
name was specified at Header construction time. The default value for
|
||||||
|
maxlinelen is determined at header construction time.
|
||||||
|
|
||||||
|
Optional splitchars is a string containing characters which should be
|
||||||
|
given extra weight by the splitting algorithm during normal header
|
||||||
|
wrapping. This is in very rough support of RFC 2822's `higher level
|
||||||
|
syntactic breaks': split points preceded by a splitchar are preferred
|
||||||
|
during line splitting, with the characters preferred in the order in
|
||||||
|
which they appear in the string. Space and tab may be included in the
|
||||||
|
string to indicate whether preference should be given to one over the
|
||||||
|
other as a split point when other split chars do not appear in the line
|
||||||
|
being split. Splitchars does not affect RFC 2047 encoded lines.
|
||||||
|
|
||||||
|
Optional linesep is a string to be used to separate the lines of
|
||||||
|
the value. The default value is the most useful for typical
|
||||||
|
Python applications, but it can be set to \r\n to produce RFC-compliant
|
||||||
|
line separators when needed.
|
||||||
|
"""
|
||||||
|
self._normalize()
|
||||||
|
if maxlinelen is None:
|
||||||
|
maxlinelen = self._maxlinelen
|
||||||
|
# A maxlinelen of 0 means don't wrap. For all practical purposes,
|
||||||
|
# choosing a huge number here accomplishes that and makes the
|
||||||
|
# _ValueFormatter algorithm much simpler.
|
||||||
|
if maxlinelen == 0:
|
||||||
|
maxlinelen = 1000000
|
||||||
|
formatter = _ValueFormatter(self._headerlen, maxlinelen,
|
||||||
|
self._continuation_ws, splitchars)
|
||||||
|
lastcs = None
|
||||||
|
hasspace = lastspace = None
|
||||||
|
for string, charset in self._chunks:
|
||||||
|
if hasspace is not None:
|
||||||
|
hasspace = string and self._nonctext(string[0])
|
||||||
|
import sys
|
||||||
|
if lastcs not in (None, 'us-ascii'):
|
||||||
|
if not hasspace or charset not in (None, 'us-ascii'):
|
||||||
|
formatter.add_transition()
|
||||||
|
elif charset not in (None, 'us-ascii') and not lastspace:
|
||||||
|
formatter.add_transition()
|
||||||
|
lastspace = string and self._nonctext(string[-1])
|
||||||
|
lastcs = charset
|
||||||
|
hasspace = False
|
||||||
|
lines = string.splitlines()
|
||||||
|
if lines:
|
||||||
|
formatter.feed('', lines[0], charset)
|
||||||
|
else:
|
||||||
|
formatter.feed('', '', charset)
|
||||||
|
for line in lines[1:]:
|
||||||
|
formatter.newline()
|
||||||
|
if charset.header_encoding is not None:
|
||||||
|
formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
|
||||||
|
charset)
|
||||||
|
else:
|
||||||
|
sline = line.lstrip()
|
||||||
|
fws = line[:len(line)-len(sline)]
|
||||||
|
formatter.feed(fws, sline, charset)
|
||||||
|
if len(lines) > 1:
|
||||||
|
formatter.newline()
|
||||||
|
if self._chunks:
|
||||||
|
formatter.add_transition()
|
||||||
|
value = formatter._str(linesep)
|
||||||
|
if _embeded_header.search(value):
|
||||||
|
raise HeaderParseError("header value appears to contain "
|
||||||
|
"an embedded header: {!r}".format(value))
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _normalize(self):
|
||||||
|
# Step 1: Normalize the chunks so that all runs of identical charsets
|
||||||
|
# get collapsed into a single unicode string.
|
||||||
|
chunks = []
|
||||||
|
last_charset = None
|
||||||
|
last_chunk = []
|
||||||
|
for string, charset in self._chunks:
|
||||||
|
if charset == last_charset:
|
||||||
|
last_chunk.append(string)
|
||||||
|
else:
|
||||||
|
if last_charset is not None:
|
||||||
|
chunks.append((SPACE.join(last_chunk), last_charset))
|
||||||
|
last_chunk = [string]
|
||||||
|
last_charset = charset
|
||||||
|
if last_chunk:
|
||||||
|
chunks.append((SPACE.join(last_chunk), last_charset))
|
||||||
|
self._chunks = chunks
|
||||||
|
|
||||||
|
|
||||||
|
class _ValueFormatter(object):
|
||||||
|
def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
|
||||||
|
self._maxlen = maxlen
|
||||||
|
self._continuation_ws = continuation_ws
|
||||||
|
self._continuation_ws_len = len(continuation_ws)
|
||||||
|
self._splitchars = splitchars
|
||||||
|
self._lines = []
|
||||||
|
self._current_line = _Accumulator(headerlen)
|
||||||
|
|
||||||
|
def _str(self, linesep):
|
||||||
|
self.newline()
|
||||||
|
return linesep.join(self._lines)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self._str(NL)
|
||||||
|
|
||||||
|
def newline(self):
|
||||||
|
end_of_line = self._current_line.pop()
|
||||||
|
if end_of_line != (' ', ''):
|
||||||
|
self._current_line.push(*end_of_line)
|
||||||
|
if len(self._current_line) > 0:
|
||||||
|
if self._current_line.is_onlyws():
|
||||||
|
self._lines[-1] += str(self._current_line)
|
||||||
|
else:
|
||||||
|
self._lines.append(str(self._current_line))
|
||||||
|
self._current_line.reset()
|
||||||
|
|
||||||
|
def add_transition(self):
|
||||||
|
self._current_line.push(' ', '')
|
||||||
|
|
||||||
|
def feed(self, fws, string, charset):
|
||||||
|
# If the charset has no header encoding (i.e. it is an ASCII encoding)
|
||||||
|
# then we must split the header at the "highest level syntactic break"
|
||||||
|
# possible. Note that we don't have a lot of smarts about field
|
||||||
|
# syntax; we just try to break on semi-colons, then commas, then
|
||||||
|
# whitespace. Eventually, this should be pluggable.
|
||||||
|
if charset.header_encoding is None:
|
||||||
|
self._ascii_split(fws, string, self._splitchars)
|
||||||
|
return
|
||||||
|
# Otherwise, we're doing either a Base64 or a quoted-printable
|
||||||
|
# encoding which means we don't need to split the line on syntactic
|
||||||
|
# breaks. We can basically just find enough characters to fit on the
|
||||||
|
# current line, minus the RFC 2047 chrome. What makes this trickier
|
||||||
|
# though is that we have to split at octet boundaries, not character
|
||||||
|
# boundaries but it's only safe to split at character boundaries so at
|
||||||
|
# best we can only get close.
|
||||||
|
encoded_lines = charset.header_encode_lines(string, self._maxlengths())
|
||||||
|
# The first element extends the current line, but if it's None then
|
||||||
|
# nothing more fit on the current line so start a new line.
|
||||||
|
try:
|
||||||
|
first_line = encoded_lines.pop(0)
|
||||||
|
except IndexError:
|
||||||
|
# There are no encoded lines, so we're done.
|
||||||
|
return
|
||||||
|
if first_line is not None:
|
||||||
|
self._append_chunk(fws, first_line)
|
||||||
|
try:
|
||||||
|
last_line = encoded_lines.pop()
|
||||||
|
except IndexError:
|
||||||
|
# There was only one line.
|
||||||
|
return
|
||||||
|
self.newline()
|
||||||
|
self._current_line.push(self._continuation_ws, last_line)
|
||||||
|
# Everything else are full lines in themselves.
|
||||||
|
for line in encoded_lines:
|
||||||
|
self._lines.append(self._continuation_ws + line)
|
||||||
|
|
||||||
|
def _maxlengths(self):
|
||||||
|
# The first line's length.
|
||||||
|
yield self._maxlen - len(self._current_line)
|
||||||
|
while True:
|
||||||
|
yield self._maxlen - self._continuation_ws_len
|
||||||
|
|
||||||
|
def _ascii_split(self, fws, string, splitchars):
|
||||||
|
# The RFC 2822 header folding algorithm is simple in principle but
|
||||||
|
# complex in practice. Lines may be folded any place where "folding
|
||||||
|
# white space" appears by inserting a linesep character in front of the
|
||||||
|
# FWS. The complication is that not all spaces or tabs qualify as FWS,
|
||||||
|
# and we are also supposed to prefer to break at "higher level
|
||||||
|
# syntactic breaks". We can't do either of these without intimate
|
||||||
|
# knowledge of the structure of structured headers, which we don't have
|
||||||
|
# here. So the best we can do here is prefer to break at the specified
|
||||||
|
# splitchars, and hope that we don't choose any spaces or tabs that
|
||||||
|
# aren't legal FWS. (This is at least better than the old algorithm,
|
||||||
|
# where we would sometimes *introduce* FWS after a splitchar, or the
|
||||||
|
# algorithm before that, where we would turn all white space runs into
|
||||||
|
# single spaces or tabs.)
|
||||||
|
parts = re.split("(["+FWS+"]+)", fws+string)
|
||||||
|
if parts[0]:
|
||||||
|
parts[:0] = ['']
|
||||||
|
else:
|
||||||
|
parts.pop(0)
|
||||||
|
for fws, part in zip(*[iter(parts)]*2):
|
||||||
|
self._append_chunk(fws, part)
|
||||||
|
|
||||||
|
def _append_chunk(self, fws, string):
|
||||||
|
self._current_line.push(fws, string)
|
||||||
|
if len(self._current_line) > self._maxlen:
|
||||||
|
# Find the best split point, working backward from the end.
|
||||||
|
# There might be none, on a long first line.
|
||||||
|
for ch in self._splitchars:
|
||||||
|
for i in range(self._current_line.part_count()-1, 0, -1):
|
||||||
|
if ch.isspace():
|
||||||
|
fws = self._current_line[i][0]
|
||||||
|
if fws and fws[0]==ch:
|
||||||
|
break
|
||||||
|
prevpart = self._current_line[i-1][1]
|
||||||
|
if prevpart and prevpart[-1]==ch:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
fws, part = self._current_line.pop()
|
||||||
|
if self._current_line._initial_size > 0:
|
||||||
|
# There will be a header, so leave it on a line by itself.
|
||||||
|
self.newline()
|
||||||
|
if not fws:
|
||||||
|
# We don't use continuation_ws here because the whitespace
|
||||||
|
# after a header should always be a space.
|
||||||
|
fws = ' '
|
||||||
|
self._current_line.push(fws, part)
|
||||||
|
return
|
||||||
|
remainder = self._current_line.pop_from(i)
|
||||||
|
self._lines.append(str(self._current_line))
|
||||||
|
self._current_line.reset(remainder)
|
||||||
|
|
||||||
|
|
||||||
|
class _Accumulator(list):
|
||||||
|
|
||||||
|
def __init__(self, initial_size=0):
|
||||||
|
self._initial_size = initial_size
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def push(self, fws, string):
|
||||||
|
self.append((fws, string))
|
||||||
|
|
||||||
|
def pop_from(self, i=0):
|
||||||
|
popped = self[i:]
|
||||||
|
self[i:] = []
|
||||||
|
return popped
|
||||||
|
|
||||||
|
def pop(self):
|
||||||
|
if self.part_count()==0:
|
||||||
|
return ('', '')
|
||||||
|
return super().pop()
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return sum((len(fws)+len(part) for fws, part in self),
|
||||||
|
self._initial_size)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
|
||||||
|
for fws, part in self))
|
||||||
|
|
||||||
|
def reset(self, startval=None):
|
||||||
|
if startval is None:
|
||||||
|
startval = []
|
||||||
|
self[:] = startval
|
||||||
|
self._initial_size = 0
|
||||||
|
|
||||||
|
def is_onlyws(self):
|
||||||
|
return self._initial_size==0 and (not self or str(self).isspace())
|
||||||
|
|
||||||
|
def part_count(self):
|
||||||
|
return super().__len__()
|
592
venv/Lib/site-packages/future/backports/email/headerregistry.py
Normal file
592
venv/Lib/site-packages/future/backports/email/headerregistry.py
Normal file
@ -0,0 +1,592 @@
|
|||||||
|
"""Representing and manipulating email headers via custom objects.
|
||||||
|
|
||||||
|
This module provides an implementation of the HeaderRegistry API.
|
||||||
|
The implementation is designed to flexibly follow RFC5322 rules.
|
||||||
|
|
||||||
|
Eventually HeaderRegistry will be a public API, but it isn't yet,
|
||||||
|
and will probably change some before that happens.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from future.builtins import super
|
||||||
|
from future.builtins import str
|
||||||
|
from future.utils import text_to_native_str
|
||||||
|
from future.backports.email import utils
|
||||||
|
from future.backports.email import errors
|
||||||
|
from future.backports.email import _header_value_parser as parser
|
||||||
|
|
||||||
|
class Address(object):
|
||||||
|
|
||||||
|
def __init__(self, display_name='', username='', domain='', addr_spec=None):
|
||||||
|
"""Create an object represeting a full email address.
|
||||||
|
|
||||||
|
An address can have a 'display_name', a 'username', and a 'domain'. In
|
||||||
|
addition to specifying the username and domain separately, they may be
|
||||||
|
specified together by using the addr_spec keyword *instead of* the
|
||||||
|
username and domain keywords. If an addr_spec string is specified it
|
||||||
|
must be properly quoted according to RFC 5322 rules; an error will be
|
||||||
|
raised if it is not.
|
||||||
|
|
||||||
|
An Address object has display_name, username, domain, and addr_spec
|
||||||
|
attributes, all of which are read-only. The addr_spec and the string
|
||||||
|
value of the object are both quoted according to RFC5322 rules, but
|
||||||
|
without any Content Transfer Encoding.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# This clause with its potential 'raise' may only happen when an
|
||||||
|
# application program creates an Address object using an addr_spec
|
||||||
|
# keyword. The email library code itself must always supply username
|
||||||
|
# and domain.
|
||||||
|
if addr_spec is not None:
|
||||||
|
if username or domain:
|
||||||
|
raise TypeError("addrspec specified when username and/or "
|
||||||
|
"domain also specified")
|
||||||
|
a_s, rest = parser.get_addr_spec(addr_spec)
|
||||||
|
if rest:
|
||||||
|
raise ValueError("Invalid addr_spec; only '{}' "
|
||||||
|
"could be parsed from '{}'".format(
|
||||||
|
a_s, addr_spec))
|
||||||
|
if a_s.all_defects:
|
||||||
|
raise a_s.all_defects[0]
|
||||||
|
username = a_s.local_part
|
||||||
|
domain = a_s.domain
|
||||||
|
self._display_name = display_name
|
||||||
|
self._username = username
|
||||||
|
self._domain = domain
|
||||||
|
|
||||||
|
@property
|
||||||
|
def display_name(self):
|
||||||
|
return self._display_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def username(self):
|
||||||
|
return self._username
|
||||||
|
|
||||||
|
@property
|
||||||
|
def domain(self):
|
||||||
|
return self._domain
|
||||||
|
|
||||||
|
@property
|
||||||
|
def addr_spec(self):
|
||||||
|
"""The addr_spec (username@domain) portion of the address, quoted
|
||||||
|
according to RFC 5322 rules, but with no Content Transfer Encoding.
|
||||||
|
"""
|
||||||
|
nameset = set(self.username)
|
||||||
|
if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
|
||||||
|
lp = parser.quote_string(self.username)
|
||||||
|
else:
|
||||||
|
lp = self.username
|
||||||
|
if self.domain:
|
||||||
|
return lp + '@' + self.domain
|
||||||
|
if not lp:
|
||||||
|
return '<>'
|
||||||
|
return lp
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "Address(display_name={!r}, username={!r}, domain={!r})".format(
|
||||||
|
self.display_name, self.username, self.domain)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
nameset = set(self.display_name)
|
||||||
|
if len(nameset) > len(nameset-parser.SPECIALS):
|
||||||
|
disp = parser.quote_string(self.display_name)
|
||||||
|
else:
|
||||||
|
disp = self.display_name
|
||||||
|
if disp:
|
||||||
|
addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
|
||||||
|
return "{} <{}>".format(disp, addr_spec)
|
||||||
|
return self.addr_spec
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if type(other) != type(self):
|
||||||
|
return False
|
||||||
|
return (self.display_name == other.display_name and
|
||||||
|
self.username == other.username and
|
||||||
|
self.domain == other.domain)
|
||||||
|
|
||||||
|
|
||||||
|
class Group(object):
|
||||||
|
|
||||||
|
def __init__(self, display_name=None, addresses=None):
|
||||||
|
"""Create an object representing an address group.
|
||||||
|
|
||||||
|
An address group consists of a display_name followed by colon and an
|
||||||
|
list of addresses (see Address) terminated by a semi-colon. The Group
|
||||||
|
is created by specifying a display_name and a possibly empty list of
|
||||||
|
Address objects. A Group can also be used to represent a single
|
||||||
|
address that is not in a group, which is convenient when manipulating
|
||||||
|
lists that are a combination of Groups and individual Addresses. In
|
||||||
|
this case the display_name should be set to None. In particular, the
|
||||||
|
string representation of a Group whose display_name is None is the same
|
||||||
|
as the Address object, if there is one and only one Address object in
|
||||||
|
the addresses list.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._display_name = display_name
|
||||||
|
self._addresses = tuple(addresses) if addresses else tuple()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def display_name(self):
|
||||||
|
return self._display_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def addresses(self):
|
||||||
|
return self._addresses
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "Group(display_name={!r}, addresses={!r}".format(
|
||||||
|
self.display_name, self.addresses)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.display_name is None and len(self.addresses)==1:
|
||||||
|
return str(self.addresses[0])
|
||||||
|
disp = self.display_name
|
||||||
|
if disp is not None:
|
||||||
|
nameset = set(disp)
|
||||||
|
if len(nameset) > len(nameset-parser.SPECIALS):
|
||||||
|
disp = parser.quote_string(disp)
|
||||||
|
adrstr = ", ".join(str(x) for x in self.addresses)
|
||||||
|
adrstr = ' ' + adrstr if adrstr else adrstr
|
||||||
|
return "{}:{};".format(disp, adrstr)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if type(other) != type(self):
|
||||||
|
return False
|
||||||
|
return (self.display_name == other.display_name and
|
||||||
|
self.addresses == other.addresses)
|
||||||
|
|
||||||
|
|
||||||
|
# Header Classes #
|
||||||
|
|
||||||
|
class BaseHeader(str):
|
||||||
|
|
||||||
|
"""Base class for message headers.
|
||||||
|
|
||||||
|
Implements generic behavior and provides tools for subclasses.
|
||||||
|
|
||||||
|
A subclass must define a classmethod named 'parse' that takes an unfolded
|
||||||
|
value string and a dictionary as its arguments. The dictionary will
|
||||||
|
contain one key, 'defects', initialized to an empty list. After the call
|
||||||
|
the dictionary must contain two additional keys: parse_tree, set to the
|
||||||
|
parse tree obtained from parsing the header, and 'decoded', set to the
|
||||||
|
string value of the idealized representation of the data from the value.
|
||||||
|
(That is, encoded words are decoded, and values that have canonical
|
||||||
|
representations are so represented.)
|
||||||
|
|
||||||
|
The defects key is intended to collect parsing defects, which the message
|
||||||
|
parser will subsequently dispose of as appropriate. The parser should not,
|
||||||
|
insofar as practical, raise any errors. Defects should be added to the
|
||||||
|
list instead. The standard header parsers register defects for RFC
|
||||||
|
compliance issues, for obsolete RFC syntax, and for unrecoverable parsing
|
||||||
|
errors.
|
||||||
|
|
||||||
|
The parse method may add additional keys to the dictionary. In this case
|
||||||
|
the subclass must define an 'init' method, which will be passed the
|
||||||
|
dictionary as its keyword arguments. The method should use (usually by
|
||||||
|
setting them as the value of similarly named attributes) and remove all the
|
||||||
|
extra keys added by its parse method, and then use super to call its parent
|
||||||
|
class with the remaining arguments and keywords.
|
||||||
|
|
||||||
|
The subclass should also make sure that a 'max_count' attribute is defined
|
||||||
|
that is either None or 1. XXX: need to better define this API.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __new__(cls, name, value):
|
||||||
|
kwds = {'defects': []}
|
||||||
|
cls.parse(value, kwds)
|
||||||
|
if utils._has_surrogates(kwds['decoded']):
|
||||||
|
kwds['decoded'] = utils._sanitize(kwds['decoded'])
|
||||||
|
self = str.__new__(cls, kwds['decoded'])
|
||||||
|
# del kwds['decoded']
|
||||||
|
self.init(name, **kwds)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def init(self, name, **_3to2kwargs):
|
||||||
|
defects = _3to2kwargs['defects']; del _3to2kwargs['defects']
|
||||||
|
parse_tree = _3to2kwargs['parse_tree']; del _3to2kwargs['parse_tree']
|
||||||
|
self._name = name
|
||||||
|
self._parse_tree = parse_tree
|
||||||
|
self._defects = defects
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self._name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def defects(self):
|
||||||
|
return tuple(self._defects)
|
||||||
|
|
||||||
|
def __reduce__(self):
|
||||||
|
return (
|
||||||
|
_reconstruct_header,
|
||||||
|
(
|
||||||
|
self.__class__.__name__,
|
||||||
|
self.__class__.__bases__,
|
||||||
|
str(self),
|
||||||
|
),
|
||||||
|
self.__dict__)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _reconstruct(cls, value):
|
||||||
|
return str.__new__(cls, value)
|
||||||
|
|
||||||
|
def fold(self, **_3to2kwargs):
|
||||||
|
policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||||
|
"""Fold header according to policy.
|
||||||
|
|
||||||
|
The parsed representation of the header is folded according to
|
||||||
|
RFC5322 rules, as modified by the policy. If the parse tree
|
||||||
|
contains surrogateescaped bytes, the bytes are CTE encoded using
|
||||||
|
the charset 'unknown-8bit".
|
||||||
|
|
||||||
|
Any non-ASCII characters in the parse tree are CTE encoded using
|
||||||
|
charset utf-8. XXX: make this a policy setting.
|
||||||
|
|
||||||
|
The returned value is an ASCII-only string possibly containing linesep
|
||||||
|
characters, and ending with a linesep character. The string includes
|
||||||
|
the header name and the ': ' separator.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# At some point we need to only put fws here if it was in the source.
|
||||||
|
header = parser.Header([
|
||||||
|
parser.HeaderLabel([
|
||||||
|
parser.ValueTerminal(self.name, 'header-name'),
|
||||||
|
parser.ValueTerminal(':', 'header-sep')]),
|
||||||
|
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]),
|
||||||
|
self._parse_tree])
|
||||||
|
return header.fold(policy=policy)
|
||||||
|
|
||||||
|
|
||||||
|
def _reconstruct_header(cls_name, bases, value):
|
||||||
|
return type(text_to_native_str(cls_name), bases, {})._reconstruct(value)
|
||||||
|
|
||||||
|
|
||||||
|
class UnstructuredHeader(object):
|
||||||
|
|
||||||
|
max_count = None
|
||||||
|
value_parser = staticmethod(parser.get_unstructured)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
kwds['parse_tree'] = cls.value_parser(value)
|
||||||
|
kwds['decoded'] = str(kwds['parse_tree'])
|
||||||
|
|
||||||
|
|
||||||
|
class UniqueUnstructuredHeader(UnstructuredHeader):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
|
||||||
|
class DateHeader(object):
|
||||||
|
|
||||||
|
"""Header whose value consists of a single timestamp.
|
||||||
|
|
||||||
|
Provides an additional attribute, datetime, which is either an aware
|
||||||
|
datetime using a timezone, or a naive datetime if the timezone
|
||||||
|
in the input string is -0000. Also accepts a datetime as input.
|
||||||
|
The 'value' attribute is the normalized form of the timestamp,
|
||||||
|
which means it is the output of format_datetime on the datetime.
|
||||||
|
"""
|
||||||
|
|
||||||
|
max_count = None
|
||||||
|
|
||||||
|
# This is used only for folding, not for creating 'decoded'.
|
||||||
|
value_parser = staticmethod(parser.get_unstructured)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
if not value:
|
||||||
|
kwds['defects'].append(errors.HeaderMissingRequiredValue())
|
||||||
|
kwds['datetime'] = None
|
||||||
|
kwds['decoded'] = ''
|
||||||
|
kwds['parse_tree'] = parser.TokenList()
|
||||||
|
return
|
||||||
|
if isinstance(value, str):
|
||||||
|
value = utils.parsedate_to_datetime(value)
|
||||||
|
kwds['datetime'] = value
|
||||||
|
kwds['decoded'] = utils.format_datetime(kwds['datetime'])
|
||||||
|
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
self._datetime = kw.pop('datetime')
|
||||||
|
super().init(*args, **kw)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def datetime(self):
|
||||||
|
return self._datetime
|
||||||
|
|
||||||
|
|
||||||
|
class UniqueDateHeader(DateHeader):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
|
||||||
|
class AddressHeader(object):
|
||||||
|
|
||||||
|
max_count = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def value_parser(value):
|
||||||
|
address_list, value = parser.get_address_list(value)
|
||||||
|
assert not value, 'this should not happen'
|
||||||
|
return address_list
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
if isinstance(value, str):
|
||||||
|
# We are translating here from the RFC language (address/mailbox)
|
||||||
|
# to our API language (group/address).
|
||||||
|
kwds['parse_tree'] = address_list = cls.value_parser(value)
|
||||||
|
groups = []
|
||||||
|
for addr in address_list.addresses:
|
||||||
|
groups.append(Group(addr.display_name,
|
||||||
|
[Address(mb.display_name or '',
|
||||||
|
mb.local_part or '',
|
||||||
|
mb.domain or '')
|
||||||
|
for mb in addr.all_mailboxes]))
|
||||||
|
defects = list(address_list.all_defects)
|
||||||
|
else:
|
||||||
|
# Assume it is Address/Group stuff
|
||||||
|
if not hasattr(value, '__iter__'):
|
||||||
|
value = [value]
|
||||||
|
groups = [Group(None, [item]) if not hasattr(item, 'addresses')
|
||||||
|
else item
|
||||||
|
for item in value]
|
||||||
|
defects = []
|
||||||
|
kwds['groups'] = groups
|
||||||
|
kwds['defects'] = defects
|
||||||
|
kwds['decoded'] = ', '.join([str(item) for item in groups])
|
||||||
|
if 'parse_tree' not in kwds:
|
||||||
|
kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
self._groups = tuple(kw.pop('groups'))
|
||||||
|
self._addresses = None
|
||||||
|
super().init(*args, **kw)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def groups(self):
|
||||||
|
return self._groups
|
||||||
|
|
||||||
|
@property
|
||||||
|
def addresses(self):
|
||||||
|
if self._addresses is None:
|
||||||
|
self._addresses = tuple([address for group in self._groups
|
||||||
|
for address in group.addresses])
|
||||||
|
return self._addresses
|
||||||
|
|
||||||
|
|
||||||
|
class UniqueAddressHeader(AddressHeader):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
|
||||||
|
class SingleAddressHeader(AddressHeader):
|
||||||
|
|
||||||
|
@property
|
||||||
|
def address(self):
|
||||||
|
if len(self.addresses)!=1:
|
||||||
|
raise ValueError(("value of single address header {} is not "
|
||||||
|
"a single address").format(self.name))
|
||||||
|
return self.addresses[0]
|
||||||
|
|
||||||
|
|
||||||
|
class UniqueSingleAddressHeader(SingleAddressHeader):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEVersionHeader(object):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
value_parser = staticmethod(parser.parse_mime_version)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
|
||||||
|
kwds['decoded'] = str(parse_tree)
|
||||||
|
kwds['defects'].extend(parse_tree.all_defects)
|
||||||
|
kwds['major'] = None if parse_tree.minor is None else parse_tree.major
|
||||||
|
kwds['minor'] = parse_tree.minor
|
||||||
|
if parse_tree.minor is not None:
|
||||||
|
kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor'])
|
||||||
|
else:
|
||||||
|
kwds['version'] = None
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
self._version = kw.pop('version')
|
||||||
|
self._major = kw.pop('major')
|
||||||
|
self._minor = kw.pop('minor')
|
||||||
|
super().init(*args, **kw)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def major(self):
|
||||||
|
return self._major
|
||||||
|
|
||||||
|
@property
|
||||||
|
def minor(self):
|
||||||
|
return self._minor
|
||||||
|
|
||||||
|
@property
|
||||||
|
def version(self):
|
||||||
|
return self._version
|
||||||
|
|
||||||
|
|
||||||
|
class ParameterizedMIMEHeader(object):
|
||||||
|
|
||||||
|
# Mixin that handles the params dict. Must be subclassed and
|
||||||
|
# a property value_parser for the specific header provided.
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
|
||||||
|
kwds['decoded'] = str(parse_tree)
|
||||||
|
kwds['defects'].extend(parse_tree.all_defects)
|
||||||
|
if parse_tree.params is None:
|
||||||
|
kwds['params'] = {}
|
||||||
|
else:
|
||||||
|
# The MIME RFCs specify that parameter ordering is arbitrary.
|
||||||
|
kwds['params'] = dict((utils._sanitize(name).lower(),
|
||||||
|
utils._sanitize(value))
|
||||||
|
for name, value in parse_tree.params)
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
self._params = kw.pop('params')
|
||||||
|
super().init(*args, **kw)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def params(self):
|
||||||
|
return self._params.copy()
|
||||||
|
|
||||||
|
|
||||||
|
class ContentTypeHeader(ParameterizedMIMEHeader):
|
||||||
|
|
||||||
|
value_parser = staticmethod(parser.parse_content_type_header)
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
super().init(*args, **kw)
|
||||||
|
self._maintype = utils._sanitize(self._parse_tree.maintype)
|
||||||
|
self._subtype = utils._sanitize(self._parse_tree.subtype)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def maintype(self):
|
||||||
|
return self._maintype
|
||||||
|
|
||||||
|
@property
|
||||||
|
def subtype(self):
|
||||||
|
return self._subtype
|
||||||
|
|
||||||
|
@property
|
||||||
|
def content_type(self):
|
||||||
|
return self.maintype + '/' + self.subtype
|
||||||
|
|
||||||
|
|
||||||
|
class ContentDispositionHeader(ParameterizedMIMEHeader):
|
||||||
|
|
||||||
|
value_parser = staticmethod(parser.parse_content_disposition_header)
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
super().init(*args, **kw)
|
||||||
|
cd = self._parse_tree.content_disposition
|
||||||
|
self._content_disposition = cd if cd is None else utils._sanitize(cd)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def content_disposition(self):
|
||||||
|
return self._content_disposition
|
||||||
|
|
||||||
|
|
||||||
|
class ContentTransferEncodingHeader(object):
|
||||||
|
|
||||||
|
max_count = 1
|
||||||
|
|
||||||
|
value_parser = staticmethod(parser.parse_content_transfer_encoding_header)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, value, kwds):
|
||||||
|
kwds['parse_tree'] = parse_tree = cls.value_parser(value)
|
||||||
|
kwds['decoded'] = str(parse_tree)
|
||||||
|
kwds['defects'].extend(parse_tree.all_defects)
|
||||||
|
|
||||||
|
def init(self, *args, **kw):
|
||||||
|
super().init(*args, **kw)
|
||||||
|
self._cte = utils._sanitize(self._parse_tree.cte)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def cte(self):
|
||||||
|
return self._cte
|
||||||
|
|
||||||
|
|
||||||
|
# The header factory #
|
||||||
|
|
||||||
|
_default_header_map = {
|
||||||
|
'subject': UniqueUnstructuredHeader,
|
||||||
|
'date': UniqueDateHeader,
|
||||||
|
'resent-date': DateHeader,
|
||||||
|
'orig-date': UniqueDateHeader,
|
||||||
|
'sender': UniqueSingleAddressHeader,
|
||||||
|
'resent-sender': SingleAddressHeader,
|
||||||
|
'to': UniqueAddressHeader,
|
||||||
|
'resent-to': AddressHeader,
|
||||||
|
'cc': UniqueAddressHeader,
|
||||||
|
'resent-cc': AddressHeader,
|
||||||
|
'bcc': UniqueAddressHeader,
|
||||||
|
'resent-bcc': AddressHeader,
|
||||||
|
'from': UniqueAddressHeader,
|
||||||
|
'resent-from': AddressHeader,
|
||||||
|
'reply-to': UniqueAddressHeader,
|
||||||
|
'mime-version': MIMEVersionHeader,
|
||||||
|
'content-type': ContentTypeHeader,
|
||||||
|
'content-disposition': ContentDispositionHeader,
|
||||||
|
'content-transfer-encoding': ContentTransferEncodingHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
class HeaderRegistry(object):
|
||||||
|
|
||||||
|
"""A header_factory and header registry."""
|
||||||
|
|
||||||
|
def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
|
||||||
|
use_default_map=True):
|
||||||
|
"""Create a header_factory that works with the Policy API.
|
||||||
|
|
||||||
|
base_class is the class that will be the last class in the created
|
||||||
|
header class's __bases__ list. default_class is the class that will be
|
||||||
|
used if "name" (see __call__) does not appear in the registry.
|
||||||
|
use_default_map controls whether or not the default mapping of names to
|
||||||
|
specialized classes is copied in to the registry when the factory is
|
||||||
|
created. The default is True.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.registry = {}
|
||||||
|
self.base_class = base_class
|
||||||
|
self.default_class = default_class
|
||||||
|
if use_default_map:
|
||||||
|
self.registry.update(_default_header_map)
|
||||||
|
|
||||||
|
def map_to_type(self, name, cls):
|
||||||
|
"""Register cls as the specialized class for handling "name" headers.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.registry[name.lower()] = cls
|
||||||
|
|
||||||
|
def __getitem__(self, name):
|
||||||
|
cls = self.registry.get(name.lower(), self.default_class)
|
||||||
|
return type(text_to_native_str('_'+cls.__name__), (cls, self.base_class), {})
|
||||||
|
|
||||||
|
def __call__(self, name, value):
|
||||||
|
"""Create a header instance for header 'name' from 'value'.
|
||||||
|
|
||||||
|
Creates a header instance by creating a specialized class for parsing
|
||||||
|
and representing the specified header by combining the factory
|
||||||
|
base_class with a specialized class from the registry or the
|
||||||
|
default_class, and passing the name and value to the constructed
|
||||||
|
class's constructor.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self[name](name, value)
|
74
venv/Lib/site-packages/future/backports/email/iterators.py
Normal file
74
venv/Lib/site-packages/future/backports/email/iterators.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Various types of useful iterators and generators."""
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'body_line_iterator',
|
||||||
|
'typed_subpart_iterator',
|
||||||
|
'walk',
|
||||||
|
# Do not include _structure() since it's part of the debugging API.
|
||||||
|
]
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
|
||||||
|
# This function will become a method of the Message class
|
||||||
|
def walk(self):
|
||||||
|
"""Walk over the message tree, yielding each subpart.
|
||||||
|
|
||||||
|
The walk is performed in depth-first order. This method is a
|
||||||
|
generator.
|
||||||
|
"""
|
||||||
|
yield self
|
||||||
|
if self.is_multipart():
|
||||||
|
for subpart in self.get_payload():
|
||||||
|
for subsubpart in subpart.walk():
|
||||||
|
yield subsubpart
|
||||||
|
|
||||||
|
|
||||||
|
# These two functions are imported into the Iterators.py interface module.
|
||||||
|
def body_line_iterator(msg, decode=False):
|
||||||
|
"""Iterate over the parts, returning string payloads line-by-line.
|
||||||
|
|
||||||
|
Optional decode (default False) is passed through to .get_payload().
|
||||||
|
"""
|
||||||
|
for subpart in msg.walk():
|
||||||
|
payload = subpart.get_payload(decode=decode)
|
||||||
|
if isinstance(payload, str):
|
||||||
|
for line in StringIO(payload):
|
||||||
|
yield line
|
||||||
|
|
||||||
|
|
||||||
|
def typed_subpart_iterator(msg, maintype='text', subtype=None):
|
||||||
|
"""Iterate over the subparts with a given MIME type.
|
||||||
|
|
||||||
|
Use `maintype' as the main MIME type to match against; this defaults to
|
||||||
|
"text". Optional `subtype' is the MIME subtype to match against; if
|
||||||
|
omitted, only the main type is matched.
|
||||||
|
"""
|
||||||
|
for subpart in msg.walk():
|
||||||
|
if subpart.get_content_maintype() == maintype:
|
||||||
|
if subtype is None or subpart.get_content_subtype() == subtype:
|
||||||
|
yield subpart
|
||||||
|
|
||||||
|
|
||||||
|
def _structure(msg, fp=None, level=0, include_default=False):
|
||||||
|
"""A handy debugging aid"""
|
||||||
|
if fp is None:
|
||||||
|
fp = sys.stdout
|
||||||
|
tab = ' ' * (level * 4)
|
||||||
|
print(tab + msg.get_content_type(), end='', file=fp)
|
||||||
|
if include_default:
|
||||||
|
print(' [%s]' % msg.get_default_type(), file=fp)
|
||||||
|
else:
|
||||||
|
print(file=fp)
|
||||||
|
if msg.is_multipart():
|
||||||
|
for subpart in msg.get_payload():
|
||||||
|
_structure(subpart, fp, level+1, include_default)
|
882
venv/Lib/site-packages/future/backports/email/message.py
Normal file
882
venv/Lib/site-packages/future/backports/email/message.py
Normal file
@ -0,0 +1,882 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright (C) 2001-2007 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Basic message object for the email package object model."""
|
||||||
|
from __future__ import absolute_import, division, unicode_literals
|
||||||
|
from future.builtins import list, range, str, zip
|
||||||
|
|
||||||
|
__all__ = ['Message']
|
||||||
|
|
||||||
|
import re
|
||||||
|
import uu
|
||||||
|
import base64
|
||||||
|
import binascii
|
||||||
|
from io import BytesIO, StringIO
|
||||||
|
|
||||||
|
# Intrapackage imports
|
||||||
|
from future.utils import as_native_str
|
||||||
|
from future.backports.email import utils
|
||||||
|
from future.backports.email import errors
|
||||||
|
from future.backports.email._policybase import compat32
|
||||||
|
from future.backports.email import charset as _charset
|
||||||
|
from future.backports.email._encoded_words import decode_b
|
||||||
|
Charset = _charset.Charset
|
||||||
|
|
||||||
|
SEMISPACE = '; '
|
||||||
|
|
||||||
|
# Regular expression that matches `special' characters in parameters, the
|
||||||
|
# existence of which force quoting of the parameter value.
|
||||||
|
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
|
||||||
|
|
||||||
|
|
||||||
|
def _splitparam(param):
|
||||||
|
# Split header parameters. BAW: this may be too simple. It isn't
|
||||||
|
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
|
||||||
|
# found in the wild. We may eventually need a full fledged parser.
|
||||||
|
# RDM: we might have a Header here; for now just stringify it.
|
||||||
|
a, sep, b = str(param).partition(';')
|
||||||
|
if not sep:
|
||||||
|
return a.strip(), None
|
||||||
|
return a.strip(), b.strip()
|
||||||
|
|
||||||
|
def _formatparam(param, value=None, quote=True):
|
||||||
|
"""Convenience function to format and return a key=value pair.
|
||||||
|
|
||||||
|
This will quote the value if needed or if quote is true. If value is a
|
||||||
|
three tuple (charset, language, value), it will be encoded according
|
||||||
|
to RFC2231 rules. If it contains non-ascii characters it will likewise
|
||||||
|
be encoded according to RFC2231 rules, using the utf-8 charset and
|
||||||
|
a null language.
|
||||||
|
"""
|
||||||
|
if value is not None and len(value) > 0:
|
||||||
|
# A tuple is used for RFC 2231 encoded parameter values where items
|
||||||
|
# are (charset, language, value). charset is a string, not a Charset
|
||||||
|
# instance. RFC 2231 encoded values are never quoted, per RFC.
|
||||||
|
if isinstance(value, tuple):
|
||||||
|
# Encode as per RFC 2231
|
||||||
|
param += '*'
|
||||||
|
value = utils.encode_rfc2231(value[2], value[0], value[1])
|
||||||
|
return '%s=%s' % (param, value)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
value.encode('ascii')
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
param += '*'
|
||||||
|
value = utils.encode_rfc2231(value, 'utf-8', '')
|
||||||
|
return '%s=%s' % (param, value)
|
||||||
|
# BAW: Please check this. I think that if quote is set it should
|
||||||
|
# force quoting even if not necessary.
|
||||||
|
if quote or tspecials.search(value):
|
||||||
|
return '%s="%s"' % (param, utils.quote(value))
|
||||||
|
else:
|
||||||
|
return '%s=%s' % (param, value)
|
||||||
|
else:
|
||||||
|
return param
|
||||||
|
|
||||||
|
def _parseparam(s):
|
||||||
|
# RDM This might be a Header, so for now stringify it.
|
||||||
|
s = ';' + str(s)
|
||||||
|
plist = []
|
||||||
|
while s[:1] == ';':
|
||||||
|
s = s[1:]
|
||||||
|
end = s.find(';')
|
||||||
|
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
|
||||||
|
end = s.find(';', end + 1)
|
||||||
|
if end < 0:
|
||||||
|
end = len(s)
|
||||||
|
f = s[:end]
|
||||||
|
if '=' in f:
|
||||||
|
i = f.index('=')
|
||||||
|
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
|
||||||
|
plist.append(f.strip())
|
||||||
|
s = s[end:]
|
||||||
|
return plist
|
||||||
|
|
||||||
|
|
||||||
|
def _unquotevalue(value):
|
||||||
|
# This is different than utils.collapse_rfc2231_value() because it doesn't
|
||||||
|
# try to convert the value to a unicode. Message.get_param() and
|
||||||
|
# Message.get_params() are both currently defined to return the tuple in
|
||||||
|
# the face of RFC 2231 parameters.
|
||||||
|
if isinstance(value, tuple):
|
||||||
|
return value[0], value[1], utils.unquote(value[2])
|
||||||
|
else:
|
||||||
|
return utils.unquote(value)
|
||||||
|
|
||||||
|
|
||||||
|
class Message(object):
|
||||||
|
"""Basic message object.
|
||||||
|
|
||||||
|
A message object is defined as something that has a bunch of RFC 2822
|
||||||
|
headers and a payload. It may optionally have an envelope header
|
||||||
|
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
|
||||||
|
multipart or a message/rfc822), then the payload is a list of Message
|
||||||
|
objects, otherwise it is a string.
|
||||||
|
|
||||||
|
Message objects implement part of the `mapping' interface, which assumes
|
||||||
|
there is exactly one occurrence of the header per message. Some headers
|
||||||
|
do in fact appear multiple times (e.g. Received) and for those headers,
|
||||||
|
you must use the explicit API to set or get all the headers. Not all of
|
||||||
|
the mapping methods are implemented.
|
||||||
|
"""
|
||||||
|
def __init__(self, policy=compat32):
|
||||||
|
self.policy = policy
|
||||||
|
self._headers = list()
|
||||||
|
self._unixfrom = None
|
||||||
|
self._payload = None
|
||||||
|
self._charset = None
|
||||||
|
# Defaults for multipart messages
|
||||||
|
self.preamble = self.epilogue = None
|
||||||
|
self.defects = []
|
||||||
|
# Default content type
|
||||||
|
self._default_type = 'text/plain'
|
||||||
|
|
||||||
|
@as_native_str(encoding='utf-8')
|
||||||
|
def __str__(self):
|
||||||
|
"""Return the entire formatted message as a string.
|
||||||
|
This includes the headers, body, and envelope header.
|
||||||
|
"""
|
||||||
|
return self.as_string()
|
||||||
|
|
||||||
|
def as_string(self, unixfrom=False, maxheaderlen=0):
|
||||||
|
"""Return the entire formatted message as a (unicode) string.
|
||||||
|
Optional `unixfrom' when True, means include the Unix From_ envelope
|
||||||
|
header.
|
||||||
|
|
||||||
|
This is a convenience method and may not generate the message exactly
|
||||||
|
as you intend. For more flexibility, use the flatten() method of a
|
||||||
|
Generator instance.
|
||||||
|
"""
|
||||||
|
from future.backports.email.generator import Generator
|
||||||
|
fp = StringIO()
|
||||||
|
g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen)
|
||||||
|
g.flatten(self, unixfrom=unixfrom)
|
||||||
|
return fp.getvalue()
|
||||||
|
|
||||||
|
def is_multipart(self):
|
||||||
|
"""Return True if the message consists of multiple parts."""
|
||||||
|
return isinstance(self._payload, list)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Unix From_ line
|
||||||
|
#
|
||||||
|
def set_unixfrom(self, unixfrom):
|
||||||
|
self._unixfrom = unixfrom
|
||||||
|
|
||||||
|
def get_unixfrom(self):
|
||||||
|
return self._unixfrom
|
||||||
|
|
||||||
|
#
|
||||||
|
# Payload manipulation.
|
||||||
|
#
|
||||||
|
def attach(self, payload):
|
||||||
|
"""Add the given payload to the current payload.
|
||||||
|
|
||||||
|
The current payload will always be a list of objects after this method
|
||||||
|
is called. If you want to set the payload to a scalar object, use
|
||||||
|
set_payload() instead.
|
||||||
|
"""
|
||||||
|
if self._payload is None:
|
||||||
|
self._payload = [payload]
|
||||||
|
else:
|
||||||
|
self._payload.append(payload)
|
||||||
|
|
||||||
|
def get_payload(self, i=None, decode=False):
|
||||||
|
"""Return a reference to the payload.
|
||||||
|
|
||||||
|
The payload will either be a list object or a string. If you mutate
|
||||||
|
the list object, you modify the message's payload in place. Optional
|
||||||
|
i returns that index into the payload.
|
||||||
|
|
||||||
|
Optional decode is a flag indicating whether the payload should be
|
||||||
|
decoded or not, according to the Content-Transfer-Encoding header
|
||||||
|
(default is False).
|
||||||
|
|
||||||
|
When True and the message is not a multipart, the payload will be
|
||||||
|
decoded if this header's value is `quoted-printable' or `base64'. If
|
||||||
|
some other encoding is used, or the header is missing, or if the
|
||||||
|
payload has bogus data (i.e. bogus base64 or uuencoded data), the
|
||||||
|
payload is returned as-is.
|
||||||
|
|
||||||
|
If the message is a multipart and the decode flag is True, then None
|
||||||
|
is returned.
|
||||||
|
"""
|
||||||
|
# Here is the logic table for this code, based on the email5.0.0 code:
|
||||||
|
# i decode is_multipart result
|
||||||
|
# ------ ------ ------------ ------------------------------
|
||||||
|
# None True True None
|
||||||
|
# i True True None
|
||||||
|
# None False True _payload (a list)
|
||||||
|
# i False True _payload element i (a Message)
|
||||||
|
# i False False error (not a list)
|
||||||
|
# i True False error (not a list)
|
||||||
|
# None False False _payload
|
||||||
|
# None True False _payload decoded (bytes)
|
||||||
|
# Note that Barry planned to factor out the 'decode' case, but that
|
||||||
|
# isn't so easy now that we handle the 8 bit data, which needs to be
|
||||||
|
# converted in both the decode and non-decode path.
|
||||||
|
if self.is_multipart():
|
||||||
|
if decode:
|
||||||
|
return None
|
||||||
|
if i is None:
|
||||||
|
return self._payload
|
||||||
|
else:
|
||||||
|
return self._payload[i]
|
||||||
|
# For backward compatibility, Use isinstance and this error message
|
||||||
|
# instead of the more logical is_multipart test.
|
||||||
|
if i is not None and not isinstance(self._payload, list):
|
||||||
|
raise TypeError('Expected list, got %s' % type(self._payload))
|
||||||
|
payload = self._payload
|
||||||
|
# cte might be a Header, so for now stringify it.
|
||||||
|
cte = str(self.get('content-transfer-encoding', '')).lower()
|
||||||
|
# payload may be bytes here.
|
||||||
|
if isinstance(payload, str):
|
||||||
|
payload = str(payload) # for Python-Future, so surrogateescape works
|
||||||
|
if utils._has_surrogates(payload):
|
||||||
|
bpayload = payload.encode('ascii', 'surrogateescape')
|
||||||
|
if not decode:
|
||||||
|
try:
|
||||||
|
payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
|
||||||
|
except LookupError:
|
||||||
|
payload = bpayload.decode('ascii', 'replace')
|
||||||
|
elif decode:
|
||||||
|
try:
|
||||||
|
bpayload = payload.encode('ascii')
|
||||||
|
except UnicodeError:
|
||||||
|
# This won't happen for RFC compliant messages (messages
|
||||||
|
# containing only ASCII codepoints in the unicode input).
|
||||||
|
# If it does happen, turn the string into bytes in a way
|
||||||
|
# guaranteed not to fail.
|
||||||
|
bpayload = payload.encode('raw-unicode-escape')
|
||||||
|
if not decode:
|
||||||
|
return payload
|
||||||
|
if cte == 'quoted-printable':
|
||||||
|
return utils._qdecode(bpayload)
|
||||||
|
elif cte == 'base64':
|
||||||
|
# XXX: this is a bit of a hack; decode_b should probably be factored
|
||||||
|
# out somewhere, but I haven't figured out where yet.
|
||||||
|
value, defects = decode_b(b''.join(bpayload.splitlines()))
|
||||||
|
for defect in defects:
|
||||||
|
self.policy.handle_defect(self, defect)
|
||||||
|
return value
|
||||||
|
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
|
||||||
|
in_file = BytesIO(bpayload)
|
||||||
|
out_file = BytesIO()
|
||||||
|
try:
|
||||||
|
uu.decode(in_file, out_file, quiet=True)
|
||||||
|
return out_file.getvalue()
|
||||||
|
except uu.Error:
|
||||||
|
# Some decoding problem
|
||||||
|
return bpayload
|
||||||
|
if isinstance(payload, str):
|
||||||
|
return bpayload
|
||||||
|
return payload
|
||||||
|
|
||||||
|
def set_payload(self, payload, charset=None):
|
||||||
|
"""Set the payload to the given value.
|
||||||
|
|
||||||
|
Optional charset sets the message's default character set. See
|
||||||
|
set_charset() for details.
|
||||||
|
"""
|
||||||
|
self._payload = payload
|
||||||
|
if charset is not None:
|
||||||
|
self.set_charset(charset)
|
||||||
|
|
||||||
|
def set_charset(self, charset):
|
||||||
|
"""Set the charset of the payload to a given character set.
|
||||||
|
|
||||||
|
charset can be a Charset instance, a string naming a character set, or
|
||||||
|
None. If it is a string it will be converted to a Charset instance.
|
||||||
|
If charset is None, the charset parameter will be removed from the
|
||||||
|
Content-Type field. Anything else will generate a TypeError.
|
||||||
|
|
||||||
|
The message will be assumed to be of type text/* encoded with
|
||||||
|
charset.input_charset. It will be converted to charset.output_charset
|
||||||
|
and encoded properly, if needed, when generating the plain text
|
||||||
|
representation of the message. MIME headers (MIME-Version,
|
||||||
|
Content-Type, Content-Transfer-Encoding) will be added as needed.
|
||||||
|
"""
|
||||||
|
if charset is None:
|
||||||
|
self.del_param('charset')
|
||||||
|
self._charset = None
|
||||||
|
return
|
||||||
|
if not isinstance(charset, Charset):
|
||||||
|
charset = Charset(charset)
|
||||||
|
self._charset = charset
|
||||||
|
if 'MIME-Version' not in self:
|
||||||
|
self.add_header('MIME-Version', '1.0')
|
||||||
|
if 'Content-Type' not in self:
|
||||||
|
self.add_header('Content-Type', 'text/plain',
|
||||||
|
charset=charset.get_output_charset())
|
||||||
|
else:
|
||||||
|
self.set_param('charset', charset.get_output_charset())
|
||||||
|
if charset != charset.get_output_charset():
|
||||||
|
self._payload = charset.body_encode(self._payload)
|
||||||
|
if 'Content-Transfer-Encoding' not in self:
|
||||||
|
cte = charset.get_body_encoding()
|
||||||
|
try:
|
||||||
|
cte(self)
|
||||||
|
except TypeError:
|
||||||
|
self._payload = charset.body_encode(self._payload)
|
||||||
|
self.add_header('Content-Transfer-Encoding', cte)
|
||||||
|
|
||||||
|
def get_charset(self):
|
||||||
|
"""Return the Charset instance associated with the message's payload.
|
||||||
|
"""
|
||||||
|
return self._charset
|
||||||
|
|
||||||
|
#
|
||||||
|
# MAPPING INTERFACE (partial)
|
||||||
|
#
|
||||||
|
def __len__(self):
|
||||||
|
"""Return the total number of headers, including duplicates."""
|
||||||
|
return len(self._headers)
|
||||||
|
|
||||||
|
def __getitem__(self, name):
|
||||||
|
"""Get a header value.
|
||||||
|
|
||||||
|
Return None if the header is missing instead of raising an exception.
|
||||||
|
|
||||||
|
Note that if the header appeared multiple times, exactly which
|
||||||
|
occurrence gets returned is undefined. Use get_all() to get all
|
||||||
|
the values matching a header field name.
|
||||||
|
"""
|
||||||
|
return self.get(name)
|
||||||
|
|
||||||
|
def __setitem__(self, name, val):
|
||||||
|
"""Set the value of a header.
|
||||||
|
|
||||||
|
Note: this does not overwrite an existing header with the same field
|
||||||
|
name. Use __delitem__() first to delete any existing headers.
|
||||||
|
"""
|
||||||
|
max_count = self.policy.header_max_count(name)
|
||||||
|
if max_count:
|
||||||
|
lname = name.lower()
|
||||||
|
found = 0
|
||||||
|
for k, v in self._headers:
|
||||||
|
if k.lower() == lname:
|
||||||
|
found += 1
|
||||||
|
if found >= max_count:
|
||||||
|
raise ValueError("There may be at most {} {} headers "
|
||||||
|
"in a message".format(max_count, name))
|
||||||
|
self._headers.append(self.policy.header_store_parse(name, val))
|
||||||
|
|
||||||
|
def __delitem__(self, name):
|
||||||
|
"""Delete all occurrences of a header, if present.
|
||||||
|
|
||||||
|
Does not raise an exception if the header is missing.
|
||||||
|
"""
|
||||||
|
name = name.lower()
|
||||||
|
newheaders = list()
|
||||||
|
for k, v in self._headers:
|
||||||
|
if k.lower() != name:
|
||||||
|
newheaders.append((k, v))
|
||||||
|
self._headers = newheaders
|
||||||
|
|
||||||
|
def __contains__(self, name):
|
||||||
|
return name.lower() in [k.lower() for k, v in self._headers]
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for field, value in self._headers:
|
||||||
|
yield field
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
"""Return a list of all the message's header field names.
|
||||||
|
|
||||||
|
These will be sorted in the order they appeared in the original
|
||||||
|
message, or were added to the message, and may contain duplicates.
|
||||||
|
Any fields deleted and re-inserted are always appended to the header
|
||||||
|
list.
|
||||||
|
"""
|
||||||
|
return [k for k, v in self._headers]
|
||||||
|
|
||||||
|
def values(self):
|
||||||
|
"""Return a list of all the message's header values.
|
||||||
|
|
||||||
|
These will be sorted in the order they appeared in the original
|
||||||
|
message, or were added to the message, and may contain duplicates.
|
||||||
|
Any fields deleted and re-inserted are always appended to the header
|
||||||
|
list.
|
||||||
|
"""
|
||||||
|
return [self.policy.header_fetch_parse(k, v)
|
||||||
|
for k, v in self._headers]
|
||||||
|
|
||||||
|
def items(self):
|
||||||
|
"""Get all the message's header fields and values.
|
||||||
|
|
||||||
|
These will be sorted in the order they appeared in the original
|
||||||
|
message, or were added to the message, and may contain duplicates.
|
||||||
|
Any fields deleted and re-inserted are always appended to the header
|
||||||
|
list.
|
||||||
|
"""
|
||||||
|
return [(k, self.policy.header_fetch_parse(k, v))
|
||||||
|
for k, v in self._headers]
|
||||||
|
|
||||||
|
def get(self, name, failobj=None):
|
||||||
|
"""Get a header value.
|
||||||
|
|
||||||
|
Like __getitem__() but return failobj instead of None when the field
|
||||||
|
is missing.
|
||||||
|
"""
|
||||||
|
name = name.lower()
|
||||||
|
for k, v in self._headers:
|
||||||
|
if k.lower() == name:
|
||||||
|
return self.policy.header_fetch_parse(k, v)
|
||||||
|
return failobj
|
||||||
|
|
||||||
|
#
|
||||||
|
# "Internal" methods (public API, but only intended for use by a parser
|
||||||
|
# or generator, not normal application code.
|
||||||
|
#
|
||||||
|
|
||||||
|
def set_raw(self, name, value):
|
||||||
|
"""Store name and value in the model without modification.
|
||||||
|
|
||||||
|
This is an "internal" API, intended only for use by a parser.
|
||||||
|
"""
|
||||||
|
self._headers.append((name, value))
|
||||||
|
|
||||||
|
def raw_items(self):
|
||||||
|
"""Return the (name, value) header pairs without modification.
|
||||||
|
|
||||||
|
This is an "internal" API, intended only for use by a generator.
|
||||||
|
"""
|
||||||
|
return iter(self._headers.copy())
|
||||||
|
|
||||||
|
#
|
||||||
|
# Additional useful stuff
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_all(self, name, failobj=None):
|
||||||
|
"""Return a list of all the values for the named field.
|
||||||
|
|
||||||
|
These will be sorted in the order they appeared in the original
|
||||||
|
message, and may contain duplicates. Any fields deleted and
|
||||||
|
re-inserted are always appended to the header list.
|
||||||
|
|
||||||
|
If no such fields exist, failobj is returned (defaults to None).
|
||||||
|
"""
|
||||||
|
values = []
|
||||||
|
name = name.lower()
|
||||||
|
for k, v in self._headers:
|
||||||
|
if k.lower() == name:
|
||||||
|
values.append(self.policy.header_fetch_parse(k, v))
|
||||||
|
if not values:
|
||||||
|
return failobj
|
||||||
|
return values
|
||||||
|
|
||||||
|
def add_header(self, _name, _value, **_params):
|
||||||
|
"""Extended header setting.
|
||||||
|
|
||||||
|
name is the header field to add. keyword arguments can be used to set
|
||||||
|
additional parameters for the header field, with underscores converted
|
||||||
|
to dashes. Normally the parameter will be added as key="value" unless
|
||||||
|
value is None, in which case only the key will be added. If a
|
||||||
|
parameter value contains non-ASCII characters it can be specified as a
|
||||||
|
three-tuple of (charset, language, value), in which case it will be
|
||||||
|
encoded according to RFC2231 rules. Otherwise it will be encoded using
|
||||||
|
the utf-8 charset and a language of ''.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
|
||||||
|
msg.add_header('content-disposition', 'attachment',
|
||||||
|
filename=('utf-8', '', 'Fußballer.ppt'))
|
||||||
|
msg.add_header('content-disposition', 'attachment',
|
||||||
|
filename='Fußballer.ppt'))
|
||||||
|
"""
|
||||||
|
parts = []
|
||||||
|
for k, v in _params.items():
|
||||||
|
if v is None:
|
||||||
|
parts.append(k.replace('_', '-'))
|
||||||
|
else:
|
||||||
|
parts.append(_formatparam(k.replace('_', '-'), v))
|
||||||
|
if _value is not None:
|
||||||
|
parts.insert(0, _value)
|
||||||
|
self[_name] = SEMISPACE.join(parts)
|
||||||
|
|
||||||
|
def replace_header(self, _name, _value):
|
||||||
|
"""Replace a header.
|
||||||
|
|
||||||
|
Replace the first matching header found in the message, retaining
|
||||||
|
header order and case. If no matching header was found, a KeyError is
|
||||||
|
raised.
|
||||||
|
"""
|
||||||
|
_name = _name.lower()
|
||||||
|
for i, (k, v) in zip(range(len(self._headers)), self._headers):
|
||||||
|
if k.lower() == _name:
|
||||||
|
self._headers[i] = self.policy.header_store_parse(k, _value)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise KeyError(_name)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Use these three methods instead of the three above.
|
||||||
|
#
|
||||||
|
|
||||||
|
def get_content_type(self):
|
||||||
|
"""Return the message's content type.
|
||||||
|
|
||||||
|
The returned string is coerced to lower case of the form
|
||||||
|
`maintype/subtype'. If there was no Content-Type header in the
|
||||||
|
message, the default type as given by get_default_type() will be
|
||||||
|
returned. Since according to RFC 2045, messages always have a default
|
||||||
|
type this will always return a value.
|
||||||
|
|
||||||
|
RFC 2045 defines a message's default type to be text/plain unless it
|
||||||
|
appears inside a multipart/digest container, in which case it would be
|
||||||
|
message/rfc822.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
value = self.get('content-type', missing)
|
||||||
|
if value is missing:
|
||||||
|
# This should have no parameters
|
||||||
|
return self.get_default_type()
|
||||||
|
ctype = _splitparam(value)[0].lower()
|
||||||
|
# RFC 2045, section 5.2 says if its invalid, use text/plain
|
||||||
|
if ctype.count('/') != 1:
|
||||||
|
return 'text/plain'
|
||||||
|
return ctype
|
||||||
|
|
||||||
|
def get_content_maintype(self):
|
||||||
|
"""Return the message's main content type.
|
||||||
|
|
||||||
|
This is the `maintype' part of the string returned by
|
||||||
|
get_content_type().
|
||||||
|
"""
|
||||||
|
ctype = self.get_content_type()
|
||||||
|
return ctype.split('/')[0]
|
||||||
|
|
||||||
|
def get_content_subtype(self):
|
||||||
|
"""Returns the message's sub-content type.
|
||||||
|
|
||||||
|
This is the `subtype' part of the string returned by
|
||||||
|
get_content_type().
|
||||||
|
"""
|
||||||
|
ctype = self.get_content_type()
|
||||||
|
return ctype.split('/')[1]
|
||||||
|
|
||||||
|
def get_default_type(self):
|
||||||
|
"""Return the `default' content type.
|
||||||
|
|
||||||
|
Most messages have a default content type of text/plain, except for
|
||||||
|
messages that are subparts of multipart/digest containers. Such
|
||||||
|
subparts have a default content type of message/rfc822.
|
||||||
|
"""
|
||||||
|
return self._default_type
|
||||||
|
|
||||||
|
def set_default_type(self, ctype):
|
||||||
|
"""Set the `default' content type.
|
||||||
|
|
||||||
|
ctype should be either "text/plain" or "message/rfc822", although this
|
||||||
|
is not enforced. The default content type is not stored in the
|
||||||
|
Content-Type header.
|
||||||
|
"""
|
||||||
|
self._default_type = ctype
|
||||||
|
|
||||||
|
def _get_params_preserve(self, failobj, header):
|
||||||
|
# Like get_params() but preserves the quoting of values. BAW:
|
||||||
|
# should this be part of the public interface?
|
||||||
|
missing = object()
|
||||||
|
value = self.get(header, missing)
|
||||||
|
if value is missing:
|
||||||
|
return failobj
|
||||||
|
params = []
|
||||||
|
for p in _parseparam(value):
|
||||||
|
try:
|
||||||
|
name, val = p.split('=', 1)
|
||||||
|
name = name.strip()
|
||||||
|
val = val.strip()
|
||||||
|
except ValueError:
|
||||||
|
# Must have been a bare attribute
|
||||||
|
name = p.strip()
|
||||||
|
val = ''
|
||||||
|
params.append((name, val))
|
||||||
|
params = utils.decode_params(params)
|
||||||
|
return params
|
||||||
|
|
||||||
|
def get_params(self, failobj=None, header='content-type', unquote=True):
|
||||||
|
"""Return the message's Content-Type parameters, as a list.
|
||||||
|
|
||||||
|
The elements of the returned list are 2-tuples of key/value pairs, as
|
||||||
|
split on the `=' sign. The left hand side of the `=' is the key,
|
||||||
|
while the right hand side is the value. If there is no `=' sign in
|
||||||
|
the parameter the value is the empty string. The value is as
|
||||||
|
described in the get_param() method.
|
||||||
|
|
||||||
|
Optional failobj is the object to return if there is no Content-Type
|
||||||
|
header. Optional header is the header to search instead of
|
||||||
|
Content-Type. If unquote is True, the value is unquoted.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
params = self._get_params_preserve(missing, header)
|
||||||
|
if params is missing:
|
||||||
|
return failobj
|
||||||
|
if unquote:
|
||||||
|
return [(k, _unquotevalue(v)) for k, v in params]
|
||||||
|
else:
|
||||||
|
return params
|
||||||
|
|
||||||
|
def get_param(self, param, failobj=None, header='content-type',
|
||||||
|
unquote=True):
|
||||||
|
"""Return the parameter value if found in the Content-Type header.
|
||||||
|
|
||||||
|
Optional failobj is the object to return if there is no Content-Type
|
||||||
|
header, or the Content-Type header has no such parameter. Optional
|
||||||
|
header is the header to search instead of Content-Type.
|
||||||
|
|
||||||
|
Parameter keys are always compared case insensitively. The return
|
||||||
|
value can either be a string, or a 3-tuple if the parameter was RFC
|
||||||
|
2231 encoded. When it's a 3-tuple, the elements of the value are of
|
||||||
|
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
|
||||||
|
LANGUAGE can be None, in which case you should consider VALUE to be
|
||||||
|
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
|
||||||
|
The parameter value (either the returned string, or the VALUE item in
|
||||||
|
the 3-tuple) is always unquoted, unless unquote is set to False.
|
||||||
|
|
||||||
|
If your application doesn't care whether the parameter was RFC 2231
|
||||||
|
encoded, it can turn the return value into a string as follows:
|
||||||
|
|
||||||
|
param = msg.get_param('foo')
|
||||||
|
param = email.utils.collapse_rfc2231_value(rawparam)
|
||||||
|
|
||||||
|
"""
|
||||||
|
if header not in self:
|
||||||
|
return failobj
|
||||||
|
for k, v in self._get_params_preserve(failobj, header):
|
||||||
|
if k.lower() == param.lower():
|
||||||
|
if unquote:
|
||||||
|
return _unquotevalue(v)
|
||||||
|
else:
|
||||||
|
return v
|
||||||
|
return failobj
|
||||||
|
|
||||||
|
def set_param(self, param, value, header='Content-Type', requote=True,
|
||||||
|
charset=None, language=''):
|
||||||
|
"""Set a parameter in the Content-Type header.
|
||||||
|
|
||||||
|
If the parameter already exists in the header, its value will be
|
||||||
|
replaced with the new value.
|
||||||
|
|
||||||
|
If header is Content-Type and has not yet been defined for this
|
||||||
|
message, it will be set to "text/plain" and the new parameter and
|
||||||
|
value will be appended as per RFC 2045.
|
||||||
|
|
||||||
|
An alternate header can specified in the header argument, and all
|
||||||
|
parameters will be quoted as necessary unless requote is False.
|
||||||
|
|
||||||
|
If charset is specified, the parameter will be encoded according to RFC
|
||||||
|
2231. Optional language specifies the RFC 2231 language, defaulting
|
||||||
|
to the empty string. Both charset and language should be strings.
|
||||||
|
"""
|
||||||
|
if not isinstance(value, tuple) and charset:
|
||||||
|
value = (charset, language, value)
|
||||||
|
|
||||||
|
if header not in self and header.lower() == 'content-type':
|
||||||
|
ctype = 'text/plain'
|
||||||
|
else:
|
||||||
|
ctype = self.get(header)
|
||||||
|
if not self.get_param(param, header=header):
|
||||||
|
if not ctype:
|
||||||
|
ctype = _formatparam(param, value, requote)
|
||||||
|
else:
|
||||||
|
ctype = SEMISPACE.join(
|
||||||
|
[ctype, _formatparam(param, value, requote)])
|
||||||
|
else:
|
||||||
|
ctype = ''
|
||||||
|
for old_param, old_value in self.get_params(header=header,
|
||||||
|
unquote=requote):
|
||||||
|
append_param = ''
|
||||||
|
if old_param.lower() == param.lower():
|
||||||
|
append_param = _formatparam(param, value, requote)
|
||||||
|
else:
|
||||||
|
append_param = _formatparam(old_param, old_value, requote)
|
||||||
|
if not ctype:
|
||||||
|
ctype = append_param
|
||||||
|
else:
|
||||||
|
ctype = SEMISPACE.join([ctype, append_param])
|
||||||
|
if ctype != self.get(header):
|
||||||
|
del self[header]
|
||||||
|
self[header] = ctype
|
||||||
|
|
||||||
|
def del_param(self, param, header='content-type', requote=True):
|
||||||
|
"""Remove the given parameter completely from the Content-Type header.
|
||||||
|
|
||||||
|
The header will be re-written in place without the parameter or its
|
||||||
|
value. All values will be quoted as necessary unless requote is
|
||||||
|
False. Optional header specifies an alternative to the Content-Type
|
||||||
|
header.
|
||||||
|
"""
|
||||||
|
if header not in self:
|
||||||
|
return
|
||||||
|
new_ctype = ''
|
||||||
|
for p, v in self.get_params(header=header, unquote=requote):
|
||||||
|
if p.lower() != param.lower():
|
||||||
|
if not new_ctype:
|
||||||
|
new_ctype = _formatparam(p, v, requote)
|
||||||
|
else:
|
||||||
|
new_ctype = SEMISPACE.join([new_ctype,
|
||||||
|
_formatparam(p, v, requote)])
|
||||||
|
if new_ctype != self.get(header):
|
||||||
|
del self[header]
|
||||||
|
self[header] = new_ctype
|
||||||
|
|
||||||
|
def set_type(self, type, header='Content-Type', requote=True):
|
||||||
|
"""Set the main type and subtype for the Content-Type header.
|
||||||
|
|
||||||
|
type must be a string in the form "maintype/subtype", otherwise a
|
||||||
|
ValueError is raised.
|
||||||
|
|
||||||
|
This method replaces the Content-Type header, keeping all the
|
||||||
|
parameters in place. If requote is False, this leaves the existing
|
||||||
|
header's quoting as is. Otherwise, the parameters will be quoted (the
|
||||||
|
default).
|
||||||
|
|
||||||
|
An alternative header can be specified in the header argument. When
|
||||||
|
the Content-Type header is set, we'll always also add a MIME-Version
|
||||||
|
header.
|
||||||
|
"""
|
||||||
|
# BAW: should we be strict?
|
||||||
|
if not type.count('/') == 1:
|
||||||
|
raise ValueError
|
||||||
|
# Set the Content-Type, you get a MIME-Version
|
||||||
|
if header.lower() == 'content-type':
|
||||||
|
del self['mime-version']
|
||||||
|
self['MIME-Version'] = '1.0'
|
||||||
|
if header not in self:
|
||||||
|
self[header] = type
|
||||||
|
return
|
||||||
|
params = self.get_params(header=header, unquote=requote)
|
||||||
|
del self[header]
|
||||||
|
self[header] = type
|
||||||
|
# Skip the first param; it's the old type.
|
||||||
|
for p, v in params[1:]:
|
||||||
|
self.set_param(p, v, header, requote)
|
||||||
|
|
||||||
|
def get_filename(self, failobj=None):
|
||||||
|
"""Return the filename associated with the payload if present.
|
||||||
|
|
||||||
|
The filename is extracted from the Content-Disposition header's
|
||||||
|
`filename' parameter, and it is unquoted. If that header is missing
|
||||||
|
the `filename' parameter, this method falls back to looking for the
|
||||||
|
`name' parameter.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
filename = self.get_param('filename', missing, 'content-disposition')
|
||||||
|
if filename is missing:
|
||||||
|
filename = self.get_param('name', missing, 'content-type')
|
||||||
|
if filename is missing:
|
||||||
|
return failobj
|
||||||
|
return utils.collapse_rfc2231_value(filename).strip()
|
||||||
|
|
||||||
|
def get_boundary(self, failobj=None):
|
||||||
|
"""Return the boundary associated with the payload if present.
|
||||||
|
|
||||||
|
The boundary is extracted from the Content-Type header's `boundary'
|
||||||
|
parameter, and it is unquoted.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
boundary = self.get_param('boundary', missing)
|
||||||
|
if boundary is missing:
|
||||||
|
return failobj
|
||||||
|
# RFC 2046 says that boundaries may begin but not end in w/s
|
||||||
|
return utils.collapse_rfc2231_value(boundary).rstrip()
|
||||||
|
|
||||||
|
def set_boundary(self, boundary):
|
||||||
|
"""Set the boundary parameter in Content-Type to 'boundary'.
|
||||||
|
|
||||||
|
This is subtly different than deleting the Content-Type header and
|
||||||
|
adding a new one with a new boundary parameter via add_header(). The
|
||||||
|
main difference is that using the set_boundary() method preserves the
|
||||||
|
order of the Content-Type header in the original message.
|
||||||
|
|
||||||
|
HeaderParseError is raised if the message has no Content-Type header.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
params = self._get_params_preserve(missing, 'content-type')
|
||||||
|
if params is missing:
|
||||||
|
# There was no Content-Type header, and we don't know what type
|
||||||
|
# to set it to, so raise an exception.
|
||||||
|
raise errors.HeaderParseError('No Content-Type header found')
|
||||||
|
newparams = list()
|
||||||
|
foundp = False
|
||||||
|
for pk, pv in params:
|
||||||
|
if pk.lower() == 'boundary':
|
||||||
|
newparams.append(('boundary', '"%s"' % boundary))
|
||||||
|
foundp = True
|
||||||
|
else:
|
||||||
|
newparams.append((pk, pv))
|
||||||
|
if not foundp:
|
||||||
|
# The original Content-Type header had no boundary attribute.
|
||||||
|
# Tack one on the end. BAW: should we raise an exception
|
||||||
|
# instead???
|
||||||
|
newparams.append(('boundary', '"%s"' % boundary))
|
||||||
|
# Replace the existing Content-Type header with the new value
|
||||||
|
newheaders = list()
|
||||||
|
for h, v in self._headers:
|
||||||
|
if h.lower() == 'content-type':
|
||||||
|
parts = list()
|
||||||
|
for k, v in newparams:
|
||||||
|
if v == '':
|
||||||
|
parts.append(k)
|
||||||
|
else:
|
||||||
|
parts.append('%s=%s' % (k, v))
|
||||||
|
val = SEMISPACE.join(parts)
|
||||||
|
newheaders.append(self.policy.header_store_parse(h, val))
|
||||||
|
|
||||||
|
else:
|
||||||
|
newheaders.append((h, v))
|
||||||
|
self._headers = newheaders
|
||||||
|
|
||||||
|
def get_content_charset(self, failobj=None):
|
||||||
|
"""Return the charset parameter of the Content-Type header.
|
||||||
|
|
||||||
|
The returned string is always coerced to lower case. If there is no
|
||||||
|
Content-Type header, or if that header has no charset parameter,
|
||||||
|
failobj is returned.
|
||||||
|
"""
|
||||||
|
missing = object()
|
||||||
|
charset = self.get_param('charset', missing)
|
||||||
|
if charset is missing:
|
||||||
|
return failobj
|
||||||
|
if isinstance(charset, tuple):
|
||||||
|
# RFC 2231 encoded, so decode it, and it better end up as ascii.
|
||||||
|
pcharset = charset[0] or 'us-ascii'
|
||||||
|
try:
|
||||||
|
# LookupError will be raised if the charset isn't known to
|
||||||
|
# Python. UnicodeError will be raised if the encoded text
|
||||||
|
# contains a character not in the charset.
|
||||||
|
as_bytes = charset[2].encode('raw-unicode-escape')
|
||||||
|
charset = str(as_bytes, pcharset)
|
||||||
|
except (LookupError, UnicodeError):
|
||||||
|
charset = charset[2]
|
||||||
|
# charset characters must be in us-ascii range
|
||||||
|
try:
|
||||||
|
charset.encode('us-ascii')
|
||||||
|
except UnicodeError:
|
||||||
|
return failobj
|
||||||
|
# RFC 2046, $4.1.2 says charsets are not case sensitive
|
||||||
|
return charset.lower()
|
||||||
|
|
||||||
|
def get_charsets(self, failobj=None):
|
||||||
|
"""Return a list containing the charset(s) used in this message.
|
||||||
|
|
||||||
|
The returned list of items describes the Content-Type headers'
|
||||||
|
charset parameter for this message and all the subparts in its
|
||||||
|
payload.
|
||||||
|
|
||||||
|
Each item will either be a string (the value of the charset parameter
|
||||||
|
in the Content-Type header of that part) or the value of the
|
||||||
|
'failobj' parameter (defaults to None), if the part does not have a
|
||||||
|
main MIME type of "text", or the charset is not defined.
|
||||||
|
|
||||||
|
The list will contain one string for each part of the message, plus
|
||||||
|
one for the container message (i.e. self), so that a non-multipart
|
||||||
|
message will still return a list of length 1.
|
||||||
|
"""
|
||||||
|
return [part.get_content_charset(failobj) for part in self.walk()]
|
||||||
|
|
||||||
|
# I.e. def walk(self): ...
|
||||||
|
from future.backports.email.iterators import walk
|
@ -0,0 +1,39 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Keith Dart
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Class representing application/* type MIME documents."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from future.backports.email import encoders
|
||||||
|
from future.backports.email.mime.nonmultipart import MIMENonMultipart
|
||||||
|
|
||||||
|
__all__ = ["MIMEApplication"]
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEApplication(MIMENonMultipart):
|
||||||
|
"""Class for generating application/* MIME documents."""
|
||||||
|
|
||||||
|
def __init__(self, _data, _subtype='octet-stream',
|
||||||
|
_encoder=encoders.encode_base64, **_params):
|
||||||
|
"""Create an application/* type MIME document.
|
||||||
|
|
||||||
|
_data is a string containing the raw application data.
|
||||||
|
|
||||||
|
_subtype is the MIME content type subtype, defaulting to
|
||||||
|
'octet-stream'.
|
||||||
|
|
||||||
|
_encoder is a function which will perform the actual encoding for
|
||||||
|
transport of the application data, defaulting to base64 encoding.
|
||||||
|
|
||||||
|
Any additional keyword arguments are passed to the base class
|
||||||
|
constructor, which turns them into parameters on the Content-Type
|
||||||
|
header.
|
||||||
|
"""
|
||||||
|
if _subtype is None:
|
||||||
|
raise TypeError('Invalid application MIME subtype')
|
||||||
|
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
|
||||||
|
self.set_payload(_data)
|
||||||
|
_encoder(self)
|
74
venv/Lib/site-packages/future/backports/email/mime/audio.py
Normal file
74
venv/Lib/site-packages/future/backports/email/mime/audio.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
# Copyright (C) 2001-2007 Python Software Foundation
|
||||||
|
# Author: Anthony Baxter
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Class representing audio/* type MIME documents."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMEAudio']
|
||||||
|
|
||||||
|
import sndhdr
|
||||||
|
|
||||||
|
from io import BytesIO
|
||||||
|
from future.backports.email import encoders
|
||||||
|
from future.backports.email.mime.nonmultipart import MIMENonMultipart
|
||||||
|
|
||||||
|
|
||||||
|
_sndhdr_MIMEmap = {'au' : 'basic',
|
||||||
|
'wav' :'x-wav',
|
||||||
|
'aiff':'x-aiff',
|
||||||
|
'aifc':'x-aiff',
|
||||||
|
}
|
||||||
|
|
||||||
|
# There are others in sndhdr that don't have MIME types. :(
|
||||||
|
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
|
||||||
|
def _whatsnd(data):
|
||||||
|
"""Try to identify a sound file type.
|
||||||
|
|
||||||
|
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
|
||||||
|
we re-do it here. It would be easier to reverse engineer the Unix 'file'
|
||||||
|
command and use the standard 'magic' file, as shipped with a modern Unix.
|
||||||
|
"""
|
||||||
|
hdr = data[:512]
|
||||||
|
fakefile = BytesIO(hdr)
|
||||||
|
for testfn in sndhdr.tests:
|
||||||
|
res = testfn(hdr, fakefile)
|
||||||
|
if res is not None:
|
||||||
|
return _sndhdr_MIMEmap.get(res[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEAudio(MIMENonMultipart):
|
||||||
|
"""Class for generating audio/* MIME documents."""
|
||||||
|
|
||||||
|
def __init__(self, _audiodata, _subtype=None,
|
||||||
|
_encoder=encoders.encode_base64, **_params):
|
||||||
|
"""Create an audio/* type MIME document.
|
||||||
|
|
||||||
|
_audiodata is a string containing the raw audio data. If this data
|
||||||
|
can be decoded by the standard Python `sndhdr' module, then the
|
||||||
|
subtype will be automatically included in the Content-Type header.
|
||||||
|
Otherwise, you can specify the specific audio subtype via the
|
||||||
|
_subtype parameter. If _subtype is not given, and no subtype can be
|
||||||
|
guessed, a TypeError is raised.
|
||||||
|
|
||||||
|
_encoder is a function which will perform the actual encoding for
|
||||||
|
transport of the image data. It takes one argument, which is this
|
||||||
|
Image instance. It should use get_payload() and set_payload() to
|
||||||
|
change the payload to the encoded form. It should also add any
|
||||||
|
Content-Transfer-Encoding or other headers to the message as
|
||||||
|
necessary. The default encoding is Base64.
|
||||||
|
|
||||||
|
Any additional keyword arguments are passed to the base class
|
||||||
|
constructor, which turns them into parameters on the Content-Type
|
||||||
|
header.
|
||||||
|
"""
|
||||||
|
if _subtype is None:
|
||||||
|
_subtype = _whatsnd(_audiodata)
|
||||||
|
if _subtype is None:
|
||||||
|
raise TypeError('Could not find audio MIME subtype')
|
||||||
|
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
|
||||||
|
self.set_payload(_audiodata)
|
||||||
|
_encoder(self)
|
25
venv/Lib/site-packages/future/backports/email/mime/base.py
Normal file
25
venv/Lib/site-packages/future/backports/email/mime/base.py
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Base class for MIME specializations."""
|
||||||
|
from __future__ import absolute_import, division, unicode_literals
|
||||||
|
from future.backports.email import message
|
||||||
|
|
||||||
|
__all__ = ['MIMEBase']
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEBase(message.Message):
|
||||||
|
"""Base class for MIME specializations."""
|
||||||
|
|
||||||
|
def __init__(self, _maintype, _subtype, **_params):
|
||||||
|
"""This constructor adds a Content-Type: and a MIME-Version: header.
|
||||||
|
|
||||||
|
The Content-Type: header is taken from the _maintype and _subtype
|
||||||
|
arguments. Additional parameters for this header are taken from the
|
||||||
|
keyword arguments.
|
||||||
|
"""
|
||||||
|
message.Message.__init__(self)
|
||||||
|
ctype = '%s/%s' % (_maintype, _subtype)
|
||||||
|
self.add_header('Content-Type', ctype, **_params)
|
||||||
|
self['MIME-Version'] = '1.0'
|
48
venv/Lib/site-packages/future/backports/email/mime/image.py
Normal file
48
venv/Lib/site-packages/future/backports/email/mime/image.py
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Class representing image/* type MIME documents."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMEImage']
|
||||||
|
|
||||||
|
import imghdr
|
||||||
|
|
||||||
|
from future.backports.email import encoders
|
||||||
|
from future.backports.email.mime.nonmultipart import MIMENonMultipart
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEImage(MIMENonMultipart):
|
||||||
|
"""Class for generating image/* type MIME documents."""
|
||||||
|
|
||||||
|
def __init__(self, _imagedata, _subtype=None,
|
||||||
|
_encoder=encoders.encode_base64, **_params):
|
||||||
|
"""Create an image/* type MIME document.
|
||||||
|
|
||||||
|
_imagedata is a string containing the raw image data. If this data
|
||||||
|
can be decoded by the standard Python `imghdr' module, then the
|
||||||
|
subtype will be automatically included in the Content-Type header.
|
||||||
|
Otherwise, you can specify the specific image subtype via the _subtype
|
||||||
|
parameter.
|
||||||
|
|
||||||
|
_encoder is a function which will perform the actual encoding for
|
||||||
|
transport of the image data. It takes one argument, which is this
|
||||||
|
Image instance. It should use get_payload() and set_payload() to
|
||||||
|
change the payload to the encoded form. It should also add any
|
||||||
|
Content-Transfer-Encoding or other headers to the message as
|
||||||
|
necessary. The default encoding is Base64.
|
||||||
|
|
||||||
|
Any additional keyword arguments are passed to the base class
|
||||||
|
constructor, which turns them into parameters on the Content-Type
|
||||||
|
header.
|
||||||
|
"""
|
||||||
|
if _subtype is None:
|
||||||
|
_subtype = imghdr.what(None, _imagedata)
|
||||||
|
if _subtype is None:
|
||||||
|
raise TypeError('Could not guess image MIME subtype')
|
||||||
|
MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
|
||||||
|
self.set_payload(_imagedata)
|
||||||
|
_encoder(self)
|
@ -0,0 +1,36 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Class representing message/* MIME documents."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMEMessage']
|
||||||
|
|
||||||
|
from future.backports.email import message
|
||||||
|
from future.backports.email.mime.nonmultipart import MIMENonMultipart
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEMessage(MIMENonMultipart):
|
||||||
|
"""Class representing message/* MIME documents."""
|
||||||
|
|
||||||
|
def __init__(self, _msg, _subtype='rfc822'):
|
||||||
|
"""Create a message/* type MIME document.
|
||||||
|
|
||||||
|
_msg is a message object and must be an instance of Message, or a
|
||||||
|
derived class of Message, otherwise a TypeError is raised.
|
||||||
|
|
||||||
|
Optional _subtype defines the subtype of the contained message. The
|
||||||
|
default is "rfc822" (this is defined by the MIME standard, even though
|
||||||
|
the term "rfc822" is technically outdated by RFC 2822).
|
||||||
|
"""
|
||||||
|
MIMENonMultipart.__init__(self, 'message', _subtype)
|
||||||
|
if not isinstance(_msg, message.Message):
|
||||||
|
raise TypeError('Argument is not an instance of Message')
|
||||||
|
# It's convenient to use this base class method. We need to do it
|
||||||
|
# this way or we'll get an exception
|
||||||
|
message.Message.attach(self, _msg)
|
||||||
|
# And be sure our default type is set correctly
|
||||||
|
self.set_default_type('message/rfc822')
|
@ -0,0 +1,49 @@
|
|||||||
|
# Copyright (C) 2002-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Base class for MIME multipart/* type messages."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMEMultipart']
|
||||||
|
|
||||||
|
from future.backports.email.mime.base import MIMEBase
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEMultipart(MIMEBase):
|
||||||
|
"""Base class for MIME multipart/* type messages."""
|
||||||
|
|
||||||
|
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
|
||||||
|
**_params):
|
||||||
|
"""Creates a multipart/* type message.
|
||||||
|
|
||||||
|
By default, creates a multipart/mixed message, with proper
|
||||||
|
Content-Type and MIME-Version headers.
|
||||||
|
|
||||||
|
_subtype is the subtype of the multipart content type, defaulting to
|
||||||
|
`mixed'.
|
||||||
|
|
||||||
|
boundary is the multipart boundary string. By default it is
|
||||||
|
calculated as needed.
|
||||||
|
|
||||||
|
_subparts is a sequence of initial subparts for the payload. It
|
||||||
|
must be an iterable object, such as a list. You can always
|
||||||
|
attach new subparts to the message by using the attach() method.
|
||||||
|
|
||||||
|
Additional parameters for the Content-Type header are taken from the
|
||||||
|
keyword arguments (or passed into the _params argument).
|
||||||
|
"""
|
||||||
|
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
|
||||||
|
|
||||||
|
# Initialise _payload to an empty list as the Message superclass's
|
||||||
|
# implementation of is_multipart assumes that _payload is a list for
|
||||||
|
# multipart messages.
|
||||||
|
self._payload = []
|
||||||
|
|
||||||
|
if _subparts:
|
||||||
|
for p in _subparts:
|
||||||
|
self.attach(p)
|
||||||
|
if boundary:
|
||||||
|
self.set_boundary(boundary)
|
@ -0,0 +1,24 @@
|
|||||||
|
# Copyright (C) 2002-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Base class for MIME type messages that are not multipart."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMENonMultipart']
|
||||||
|
|
||||||
|
from future.backports.email import errors
|
||||||
|
from future.backports.email.mime.base import MIMEBase
|
||||||
|
|
||||||
|
|
||||||
|
class MIMENonMultipart(MIMEBase):
|
||||||
|
"""Base class for MIME multipart/* type messages."""
|
||||||
|
|
||||||
|
def attach(self, payload):
|
||||||
|
# The public API prohibits attaching multiple subparts to MIMEBase
|
||||||
|
# derived subtypes since none of them are, by definition, of content
|
||||||
|
# type multipart/*
|
||||||
|
raise errors.MultipartConversionError(
|
||||||
|
'Cannot attach additional subparts to non-multipart/*')
|
44
venv/Lib/site-packages/future/backports/email/mime/text.py
Normal file
44
venv/Lib/site-packages/future/backports/email/mime/text.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Class representing text/* type MIME documents."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['MIMEText']
|
||||||
|
|
||||||
|
from future.backports.email.encoders import encode_7or8bit
|
||||||
|
from future.backports.email.mime.nonmultipart import MIMENonMultipart
|
||||||
|
|
||||||
|
|
||||||
|
class MIMEText(MIMENonMultipart):
|
||||||
|
"""Class for generating text/* type MIME documents."""
|
||||||
|
|
||||||
|
def __init__(self, _text, _subtype='plain', _charset=None):
|
||||||
|
"""Create a text/* type MIME document.
|
||||||
|
|
||||||
|
_text is the string for this message object.
|
||||||
|
|
||||||
|
_subtype is the MIME sub content type, defaulting to "plain".
|
||||||
|
|
||||||
|
_charset is the character set parameter added to the Content-Type
|
||||||
|
header. This defaults to "us-ascii". Note that as a side-effect, the
|
||||||
|
Content-Transfer-Encoding header will also be set.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# If no _charset was specified, check to see if there are non-ascii
|
||||||
|
# characters present. If not, use 'us-ascii', otherwise use utf-8.
|
||||||
|
# XXX: This can be removed once #7304 is fixed.
|
||||||
|
if _charset is None:
|
||||||
|
try:
|
||||||
|
_text.encode('us-ascii')
|
||||||
|
_charset = 'us-ascii'
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
_charset = 'utf-8'
|
||||||
|
|
||||||
|
MIMENonMultipart.__init__(self, 'text', _subtype,
|
||||||
|
**{'charset': _charset})
|
||||||
|
|
||||||
|
self.set_payload(_text, _charset)
|
135
venv/Lib/site-packages/future/backports/email/parser.py
Normal file
135
venv/Lib/site-packages/future/backports/email/parser.py
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
# Copyright (C) 2001-2007 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""A parser of RFC 2822 and MIME email messages."""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser']
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
from io import StringIO, TextIOWrapper
|
||||||
|
|
||||||
|
from future.backports.email.feedparser import FeedParser, BytesFeedParser
|
||||||
|
from future.backports.email.message import Message
|
||||||
|
from future.backports.email._policybase import compat32
|
||||||
|
|
||||||
|
|
||||||
|
class Parser(object):
|
||||||
|
def __init__(self, _class=Message, **_3to2kwargs):
|
||||||
|
"""Parser of RFC 2822 and MIME email messages.
|
||||||
|
|
||||||
|
Creates an in-memory object tree representing the email message, which
|
||||||
|
can then be manipulated and turned over to a Generator to return the
|
||||||
|
textual representation of the message.
|
||||||
|
|
||||||
|
The string must be formatted as a block of RFC 2822 headers and header
|
||||||
|
continuation lines, optionally preceeded by a `Unix-from' header. The
|
||||||
|
header block is terminated either by the end of the string or by a
|
||||||
|
blank line.
|
||||||
|
|
||||||
|
_class is the class to instantiate for new message objects when they
|
||||||
|
must be created. This class must have a constructor that can take
|
||||||
|
zero arguments. Default is Message.Message.
|
||||||
|
|
||||||
|
The policy keyword specifies a policy object that controls a number of
|
||||||
|
aspects of the parser's operation. The default policy maintains
|
||||||
|
backward compatibility.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
|
||||||
|
else: policy = compat32
|
||||||
|
self._class = _class
|
||||||
|
self.policy = policy
|
||||||
|
|
||||||
|
def parse(self, fp, headersonly=False):
|
||||||
|
"""Create a message structure from the data in a file.
|
||||||
|
|
||||||
|
Reads all the data from the file and returns the root of the message
|
||||||
|
structure. Optional headersonly is a flag specifying whether to stop
|
||||||
|
parsing after reading the headers or not. The default is False,
|
||||||
|
meaning it parses the entire contents of the file.
|
||||||
|
"""
|
||||||
|
feedparser = FeedParser(self._class, policy=self.policy)
|
||||||
|
if headersonly:
|
||||||
|
feedparser._set_headersonly()
|
||||||
|
while True:
|
||||||
|
data = fp.read(8192)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
feedparser.feed(data)
|
||||||
|
return feedparser.close()
|
||||||
|
|
||||||
|
def parsestr(self, text, headersonly=False):
|
||||||
|
"""Create a message structure from a string.
|
||||||
|
|
||||||
|
Returns the root of the message structure. Optional headersonly is a
|
||||||
|
flag specifying whether to stop parsing after reading the headers or
|
||||||
|
not. The default is False, meaning it parses the entire contents of
|
||||||
|
the file.
|
||||||
|
"""
|
||||||
|
return self.parse(StringIO(text), headersonly=headersonly)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderParser(Parser):
|
||||||
|
def parse(self, fp, headersonly=True):
|
||||||
|
return Parser.parse(self, fp, True)
|
||||||
|
|
||||||
|
def parsestr(self, text, headersonly=True):
|
||||||
|
return Parser.parsestr(self, text, True)
|
||||||
|
|
||||||
|
|
||||||
|
class BytesParser(object):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
"""Parser of binary RFC 2822 and MIME email messages.
|
||||||
|
|
||||||
|
Creates an in-memory object tree representing the email message, which
|
||||||
|
can then be manipulated and turned over to a Generator to return the
|
||||||
|
textual representation of the message.
|
||||||
|
|
||||||
|
The input must be formatted as a block of RFC 2822 headers and header
|
||||||
|
continuation lines, optionally preceeded by a `Unix-from' header. The
|
||||||
|
header block is terminated either by the end of the input or by a
|
||||||
|
blank line.
|
||||||
|
|
||||||
|
_class is the class to instantiate for new message objects when they
|
||||||
|
must be created. This class must have a constructor that can take
|
||||||
|
zero arguments. Default is Message.Message.
|
||||||
|
"""
|
||||||
|
self.parser = Parser(*args, **kw)
|
||||||
|
|
||||||
|
def parse(self, fp, headersonly=False):
|
||||||
|
"""Create a message structure from the data in a binary file.
|
||||||
|
|
||||||
|
Reads all the data from the file and returns the root of the message
|
||||||
|
structure. Optional headersonly is a flag specifying whether to stop
|
||||||
|
parsing after reading the headers or not. The default is False,
|
||||||
|
meaning it parses the entire contents of the file.
|
||||||
|
"""
|
||||||
|
fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
|
||||||
|
with fp:
|
||||||
|
return self.parser.parse(fp, headersonly)
|
||||||
|
|
||||||
|
|
||||||
|
def parsebytes(self, text, headersonly=False):
|
||||||
|
"""Create a message structure from a byte string.
|
||||||
|
|
||||||
|
Returns the root of the message structure. Optional headersonly is a
|
||||||
|
flag specifying whether to stop parsing after reading the headers or
|
||||||
|
not. The default is False, meaning it parses the entire contents of
|
||||||
|
the file.
|
||||||
|
"""
|
||||||
|
text = text.decode('ASCII', errors='surrogateescape')
|
||||||
|
return self.parser.parsestr(text, headersonly)
|
||||||
|
|
||||||
|
|
||||||
|
class BytesHeaderParser(BytesParser):
|
||||||
|
def parse(self, fp, headersonly=True):
|
||||||
|
return BytesParser.parse(self, fp, headersonly=True)
|
||||||
|
|
||||||
|
def parsebytes(self, text, headersonly=True):
|
||||||
|
return BytesParser.parsebytes(self, text, headersonly=True)
|
193
venv/Lib/site-packages/future/backports/email/policy.py
Normal file
193
venv/Lib/site-packages/future/backports/email/policy.py
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
"""This will be the home for the policy that hooks in the new
|
||||||
|
code that adds all the email6 features.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import super
|
||||||
|
|
||||||
|
from future.standard_library.email._policybase import (Policy, Compat32,
|
||||||
|
compat32, _extend_docstrings)
|
||||||
|
from future.standard_library.email.utils import _has_surrogates
|
||||||
|
from future.standard_library.email.headerregistry import HeaderRegistry as HeaderRegistry
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Compat32',
|
||||||
|
'compat32',
|
||||||
|
'Policy',
|
||||||
|
'EmailPolicy',
|
||||||
|
'default',
|
||||||
|
'strict',
|
||||||
|
'SMTP',
|
||||||
|
'HTTP',
|
||||||
|
]
|
||||||
|
|
||||||
|
@_extend_docstrings
|
||||||
|
class EmailPolicy(Policy):
|
||||||
|
|
||||||
|
"""+
|
||||||
|
PROVISIONAL
|
||||||
|
|
||||||
|
The API extensions enabled by this policy are currently provisional.
|
||||||
|
Refer to the documentation for details.
|
||||||
|
|
||||||
|
This policy adds new header parsing and folding algorithms. Instead of
|
||||||
|
simple strings, headers are custom objects with custom attributes
|
||||||
|
depending on the type of the field. The folding algorithm fully
|
||||||
|
implements RFCs 2047 and 5322.
|
||||||
|
|
||||||
|
In addition to the settable attributes listed above that apply to
|
||||||
|
all Policies, this policy adds the following additional attributes:
|
||||||
|
|
||||||
|
refold_source -- if the value for a header in the Message object
|
||||||
|
came from the parsing of some source, this attribute
|
||||||
|
indicates whether or not a generator should refold
|
||||||
|
that value when transforming the message back into
|
||||||
|
stream form. The possible values are:
|
||||||
|
|
||||||
|
none -- all source values use original folding
|
||||||
|
long -- source values that have any line that is
|
||||||
|
longer than max_line_length will be
|
||||||
|
refolded
|
||||||
|
all -- all values are refolded.
|
||||||
|
|
||||||
|
The default is 'long'.
|
||||||
|
|
||||||
|
header_factory -- a callable that takes two arguments, 'name' and
|
||||||
|
'value', where 'name' is a header field name and
|
||||||
|
'value' is an unfolded header field value, and
|
||||||
|
returns a string-like object that represents that
|
||||||
|
header. A default header_factory is provided that
|
||||||
|
understands some of the RFC5322 header field types.
|
||||||
|
(Currently address fields and date fields have
|
||||||
|
special treatment, while all other fields are
|
||||||
|
treated as unstructured. This list will be
|
||||||
|
completed before the extension is marked stable.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
refold_source = 'long'
|
||||||
|
header_factory = HeaderRegistry()
|
||||||
|
|
||||||
|
def __init__(self, **kw):
|
||||||
|
# Ensure that each new instance gets a unique header factory
|
||||||
|
# (as opposed to clones, which share the factory).
|
||||||
|
if 'header_factory' not in kw:
|
||||||
|
object.__setattr__(self, 'header_factory', HeaderRegistry())
|
||||||
|
super().__init__(**kw)
|
||||||
|
|
||||||
|
def header_max_count(self, name):
|
||||||
|
"""+
|
||||||
|
The implementation for this class returns the max_count attribute from
|
||||||
|
the specialized header class that would be used to construct a header
|
||||||
|
of type 'name'.
|
||||||
|
"""
|
||||||
|
return self.header_factory[name].max_count
|
||||||
|
|
||||||
|
# The logic of the next three methods is chosen such that it is possible to
|
||||||
|
# switch a Message object between a Compat32 policy and a policy derived
|
||||||
|
# from this class and have the results stay consistent. This allows a
|
||||||
|
# Message object constructed with this policy to be passed to a library
|
||||||
|
# that only handles Compat32 objects, or to receive such an object and
|
||||||
|
# convert it to use the newer style by just changing its policy. It is
|
||||||
|
# also chosen because it postpones the relatively expensive full rfc5322
|
||||||
|
# parse until as late as possible when parsing from source, since in many
|
||||||
|
# applications only a few headers will actually be inspected.
|
||||||
|
|
||||||
|
def header_source_parse(self, sourcelines):
|
||||||
|
"""+
|
||||||
|
The name is parsed as everything up to the ':' and returned unmodified.
|
||||||
|
The value is determined by stripping leading whitespace off the
|
||||||
|
remainder of the first line, joining all subsequent lines together, and
|
||||||
|
stripping any trailing carriage return or linefeed characters. (This
|
||||||
|
is the same as Compat32).
|
||||||
|
|
||||||
|
"""
|
||||||
|
name, value = sourcelines[0].split(':', 1)
|
||||||
|
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
|
||||||
|
return (name, value.rstrip('\r\n'))
|
||||||
|
|
||||||
|
def header_store_parse(self, name, value):
|
||||||
|
"""+
|
||||||
|
The name is returned unchanged. If the input value has a 'name'
|
||||||
|
attribute and it matches the name ignoring case, the value is returned
|
||||||
|
unchanged. Otherwise the name and value are passed to header_factory
|
||||||
|
method, and the resulting custom header object is returned as the
|
||||||
|
value. In this case a ValueError is raised if the input value contains
|
||||||
|
CR or LF characters.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if hasattr(value, 'name') and value.name.lower() == name.lower():
|
||||||
|
return (name, value)
|
||||||
|
if isinstance(value, str) and len(value.splitlines())>1:
|
||||||
|
raise ValueError("Header values may not contain linefeed "
|
||||||
|
"or carriage return characters")
|
||||||
|
return (name, self.header_factory(name, value))
|
||||||
|
|
||||||
|
def header_fetch_parse(self, name, value):
|
||||||
|
"""+
|
||||||
|
If the value has a 'name' attribute, it is returned to unmodified.
|
||||||
|
Otherwise the name and the value with any linesep characters removed
|
||||||
|
are passed to the header_factory method, and the resulting custom
|
||||||
|
header object is returned. Any surrogateescaped bytes get turned
|
||||||
|
into the unicode unknown-character glyph.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if hasattr(value, 'name'):
|
||||||
|
return value
|
||||||
|
return self.header_factory(name, ''.join(value.splitlines()))
|
||||||
|
|
||||||
|
def fold(self, name, value):
|
||||||
|
"""+
|
||||||
|
Header folding is controlled by the refold_source policy setting. A
|
||||||
|
value is considered to be a 'source value' if and only if it does not
|
||||||
|
have a 'name' attribute (having a 'name' attribute means it is a header
|
||||||
|
object of some sort). If a source value needs to be refolded according
|
||||||
|
to the policy, it is converted into a custom header object by passing
|
||||||
|
the name and the value with any linesep characters removed to the
|
||||||
|
header_factory method. Folding of a custom header object is done by
|
||||||
|
calling its fold method with the current policy.
|
||||||
|
|
||||||
|
Source values are split into lines using splitlines. If the value is
|
||||||
|
not to be refolded, the lines are rejoined using the linesep from the
|
||||||
|
policy and returned. The exception is lines containing non-ascii
|
||||||
|
binary data. In that case the value is refolded regardless of the
|
||||||
|
refold_source setting, which causes the binary data to be CTE encoded
|
||||||
|
using the unknown-8bit charset.
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._fold(name, value, refold_binary=True)
|
||||||
|
|
||||||
|
def fold_binary(self, name, value):
|
||||||
|
"""+
|
||||||
|
The same as fold if cte_type is 7bit, except that the returned value is
|
||||||
|
bytes.
|
||||||
|
|
||||||
|
If cte_type is 8bit, non-ASCII binary data is converted back into
|
||||||
|
bytes. Headers with binary data are not refolded, regardless of the
|
||||||
|
refold_header setting, since there is no way to know whether the binary
|
||||||
|
data consists of single byte characters or multibyte characters.
|
||||||
|
|
||||||
|
"""
|
||||||
|
folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
|
||||||
|
return folded.encode('ascii', 'surrogateescape')
|
||||||
|
|
||||||
|
def _fold(self, name, value, refold_binary=False):
|
||||||
|
if hasattr(value, 'name'):
|
||||||
|
return value.fold(policy=self)
|
||||||
|
maxlen = self.max_line_length if self.max_line_length else float('inf')
|
||||||
|
lines = value.splitlines()
|
||||||
|
refold = (self.refold_source == 'all' or
|
||||||
|
self.refold_source == 'long' and
|
||||||
|
(lines and len(lines[0])+len(name)+2 > maxlen or
|
||||||
|
any(len(x) > maxlen for x in lines[1:])))
|
||||||
|
if refold or refold_binary and _has_surrogates(value):
|
||||||
|
return self.header_factory(name, ''.join(lines)).fold(policy=self)
|
||||||
|
return name + ': ' + self.linesep.join(lines) + self.linesep
|
||||||
|
|
||||||
|
|
||||||
|
default = EmailPolicy()
|
||||||
|
# Make the default policy use the class default header_factory
|
||||||
|
del default.header_factory
|
||||||
|
strict = default.clone(raise_on_defect=True)
|
||||||
|
SMTP = default.clone(linesep='\r\n')
|
||||||
|
HTTP = default.clone(linesep='\r\n', max_line_length=None)
|
326
venv/Lib/site-packages/future/backports/email/quoprimime.py
Normal file
326
venv/Lib/site-packages/future/backports/email/quoprimime.py
Normal file
@ -0,0 +1,326 @@
|
|||||||
|
# Copyright (C) 2001-2006 Python Software Foundation
|
||||||
|
# Author: Ben Gertzfield
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
|
||||||
|
|
||||||
|
This module handles the content transfer encoding method defined in RFC 2045
|
||||||
|
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
|
||||||
|
safely encode text that is in a character set similar to the 7-bit US ASCII
|
||||||
|
character set, but that includes some 8-bit characters that are normally not
|
||||||
|
allowed in email bodies or headers.
|
||||||
|
|
||||||
|
Quoted-printable is very space-inefficient for encoding binary files; use the
|
||||||
|
email.base64mime module for that instead.
|
||||||
|
|
||||||
|
This module provides an interface to encode and decode both headers and bodies
|
||||||
|
with quoted-printable encoding.
|
||||||
|
|
||||||
|
RFC 2045 defines a method for including character set information in an
|
||||||
|
`encoded-word' in a header. This method is commonly used for 8-bit real names
|
||||||
|
in To:/From:/Cc: etc. fields, as well as Subject: lines.
|
||||||
|
|
||||||
|
This module does not do the line wrapping or end-of-line character
|
||||||
|
conversion necessary for proper internationalized headers; it only
|
||||||
|
does dumb encoding and decoding. To deal with the various line
|
||||||
|
wrapping issues, use the email.header module.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future.builtins import bytes, chr, dict, int, range, super
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'body_decode',
|
||||||
|
'body_encode',
|
||||||
|
'body_length',
|
||||||
|
'decode',
|
||||||
|
'decodestring',
|
||||||
|
'header_decode',
|
||||||
|
'header_encode',
|
||||||
|
'header_length',
|
||||||
|
'quote',
|
||||||
|
'unquote',
|
||||||
|
]
|
||||||
|
|
||||||
|
import re
|
||||||
|
import io
|
||||||
|
|
||||||
|
from string import ascii_letters, digits, hexdigits
|
||||||
|
|
||||||
|
CRLF = '\r\n'
|
||||||
|
NL = '\n'
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
|
||||||
|
# Build a mapping of octets to the expansion of that octet. Since we're only
|
||||||
|
# going to have 256 of these things, this isn't terribly inefficient
|
||||||
|
# space-wise. Remember that headers and bodies have different sets of safe
|
||||||
|
# characters. Initialize both maps with the full expansion, and then override
|
||||||
|
# the safe bytes with the more compact form.
|
||||||
|
_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256))
|
||||||
|
_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy()
|
||||||
|
|
||||||
|
# Safe header bytes which need no encoding.
|
||||||
|
for c in bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')):
|
||||||
|
_QUOPRI_HEADER_MAP[c] = chr(c)
|
||||||
|
# Headers have one other special encoding; spaces become underscores.
|
||||||
|
_QUOPRI_HEADER_MAP[ord(' ')] = '_'
|
||||||
|
|
||||||
|
# Safe body bytes which need no encoding.
|
||||||
|
for c in bytes(b' !"#$%&\'()*+,-./0123456789:;<>'
|
||||||
|
b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
|
||||||
|
b'abcdefghijklmnopqrstuvwxyz{|}~\t'):
|
||||||
|
_QUOPRI_BODY_MAP[c] = chr(c)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Helpers
|
||||||
|
def header_check(octet):
|
||||||
|
"""Return True if the octet should be escaped with header quopri."""
|
||||||
|
return chr(octet) != _QUOPRI_HEADER_MAP[octet]
|
||||||
|
|
||||||
|
|
||||||
|
def body_check(octet):
|
||||||
|
"""Return True if the octet should be escaped with body quopri."""
|
||||||
|
return chr(octet) != _QUOPRI_BODY_MAP[octet]
|
||||||
|
|
||||||
|
|
||||||
|
def header_length(bytearray):
|
||||||
|
"""Return a header quoted-printable encoding length.
|
||||||
|
|
||||||
|
Note that this does not include any RFC 2047 chrome added by
|
||||||
|
`header_encode()`.
|
||||||
|
|
||||||
|
:param bytearray: An array of bytes (a.k.a. octets).
|
||||||
|
:return: The length in bytes of the byte array when it is encoded with
|
||||||
|
quoted-printable for headers.
|
||||||
|
"""
|
||||||
|
return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray)
|
||||||
|
|
||||||
|
|
||||||
|
def body_length(bytearray):
|
||||||
|
"""Return a body quoted-printable encoding length.
|
||||||
|
|
||||||
|
:param bytearray: An array of bytes (a.k.a. octets).
|
||||||
|
:return: The length in bytes of the byte array when it is encoded with
|
||||||
|
quoted-printable for bodies.
|
||||||
|
"""
|
||||||
|
return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray)
|
||||||
|
|
||||||
|
|
||||||
|
def _max_append(L, s, maxlen, extra=''):
|
||||||
|
if not isinstance(s, str):
|
||||||
|
s = chr(s)
|
||||||
|
if not L:
|
||||||
|
L.append(s.lstrip())
|
||||||
|
elif len(L[-1]) + len(s) <= maxlen:
|
||||||
|
L[-1] += extra + s
|
||||||
|
else:
|
||||||
|
L.append(s.lstrip())
|
||||||
|
|
||||||
|
|
||||||
|
def unquote(s):
|
||||||
|
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
|
||||||
|
return chr(int(s[1:3], 16))
|
||||||
|
|
||||||
|
|
||||||
|
def quote(c):
|
||||||
|
return '=%02X' % ord(c)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def header_encode(header_bytes, charset='iso-8859-1'):
|
||||||
|
"""Encode a single header line with quoted-printable (like) encoding.
|
||||||
|
|
||||||
|
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
|
||||||
|
used specifically for email header fields to allow charsets with mostly 7
|
||||||
|
bit characters (and some 8 bit) to remain more or less readable in non-RFC
|
||||||
|
2045 aware mail clients.
|
||||||
|
|
||||||
|
charset names the character set to use in the RFC 2046 header. It
|
||||||
|
defaults to iso-8859-1.
|
||||||
|
"""
|
||||||
|
# Return empty headers as an empty string.
|
||||||
|
if not header_bytes:
|
||||||
|
return ''
|
||||||
|
# Iterate over every byte, encoding if necessary.
|
||||||
|
encoded = []
|
||||||
|
for octet in header_bytes:
|
||||||
|
encoded.append(_QUOPRI_HEADER_MAP[octet])
|
||||||
|
# Now add the RFC chrome to each encoded chunk and glue the chunks
|
||||||
|
# together.
|
||||||
|
return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded))
|
||||||
|
|
||||||
|
|
||||||
|
class _body_accumulator(io.StringIO):
|
||||||
|
|
||||||
|
def __init__(self, maxlinelen, eol, *args, **kw):
|
||||||
|
super().__init__(*args, **kw)
|
||||||
|
self.eol = eol
|
||||||
|
self.maxlinelen = self.room = maxlinelen
|
||||||
|
|
||||||
|
def write_str(self, s):
|
||||||
|
"""Add string s to the accumulated body."""
|
||||||
|
self.write(s)
|
||||||
|
self.room -= len(s)
|
||||||
|
|
||||||
|
def newline(self):
|
||||||
|
"""Write eol, then start new line."""
|
||||||
|
self.write_str(self.eol)
|
||||||
|
self.room = self.maxlinelen
|
||||||
|
|
||||||
|
def write_soft_break(self):
|
||||||
|
"""Write a soft break, then start a new line."""
|
||||||
|
self.write_str('=')
|
||||||
|
self.newline()
|
||||||
|
|
||||||
|
def write_wrapped(self, s, extra_room=0):
|
||||||
|
"""Add a soft line break if needed, then write s."""
|
||||||
|
if self.room < len(s) + extra_room:
|
||||||
|
self.write_soft_break()
|
||||||
|
self.write_str(s)
|
||||||
|
|
||||||
|
def write_char(self, c, is_last_char):
|
||||||
|
if not is_last_char:
|
||||||
|
# Another character follows on this line, so we must leave
|
||||||
|
# extra room, either for it or a soft break, and whitespace
|
||||||
|
# need not be quoted.
|
||||||
|
self.write_wrapped(c, extra_room=1)
|
||||||
|
elif c not in ' \t':
|
||||||
|
# For this and remaining cases, no more characters follow,
|
||||||
|
# so there is no need to reserve extra room (since a hard
|
||||||
|
# break will immediately follow).
|
||||||
|
self.write_wrapped(c)
|
||||||
|
elif self.room >= 3:
|
||||||
|
# It's a whitespace character at end-of-line, and we have room
|
||||||
|
# for the three-character quoted encoding.
|
||||||
|
self.write(quote(c))
|
||||||
|
elif self.room == 2:
|
||||||
|
# There's room for the whitespace character and a soft break.
|
||||||
|
self.write(c)
|
||||||
|
self.write_soft_break()
|
||||||
|
else:
|
||||||
|
# There's room only for a soft break. The quoted whitespace
|
||||||
|
# will be the only content on the subsequent line.
|
||||||
|
self.write_soft_break()
|
||||||
|
self.write(quote(c))
|
||||||
|
|
||||||
|
|
||||||
|
def body_encode(body, maxlinelen=76, eol=NL):
|
||||||
|
"""Encode with quoted-printable, wrapping at maxlinelen characters.
|
||||||
|
|
||||||
|
Each line of encoded text will end with eol, which defaults to "\\n". Set
|
||||||
|
this to "\\r\\n" if you will be using the result of this function directly
|
||||||
|
in an email.
|
||||||
|
|
||||||
|
Each line will be wrapped at, at most, maxlinelen characters before the
|
||||||
|
eol string (maxlinelen defaults to 76 characters, the maximum value
|
||||||
|
permitted by RFC 2045). Long lines will have the 'soft line break'
|
||||||
|
quoted-printable character "=" appended to them, so the decoded text will
|
||||||
|
be identical to the original text.
|
||||||
|
|
||||||
|
The minimum maxlinelen is 4 to have room for a quoted character ("=XX")
|
||||||
|
followed by a soft line break. Smaller values will generate a
|
||||||
|
ValueError.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
if maxlinelen < 4:
|
||||||
|
raise ValueError("maxlinelen must be at least 4")
|
||||||
|
if not body:
|
||||||
|
return body
|
||||||
|
|
||||||
|
# The last line may or may not end in eol, but all other lines do.
|
||||||
|
last_has_eol = (body[-1] in '\r\n')
|
||||||
|
|
||||||
|
# This accumulator will make it easier to build the encoded body.
|
||||||
|
encoded_body = _body_accumulator(maxlinelen, eol)
|
||||||
|
|
||||||
|
lines = body.splitlines()
|
||||||
|
last_line_no = len(lines) - 1
|
||||||
|
for line_no, line in enumerate(lines):
|
||||||
|
last_char_index = len(line) - 1
|
||||||
|
for i, c in enumerate(line):
|
||||||
|
if body_check(ord(c)):
|
||||||
|
c = quote(c)
|
||||||
|
encoded_body.write_char(c, i==last_char_index)
|
||||||
|
# Add an eol if input line had eol. All input lines have eol except
|
||||||
|
# possibly the last one.
|
||||||
|
if line_no < last_line_no or last_has_eol:
|
||||||
|
encoded_body.newline()
|
||||||
|
|
||||||
|
return encoded_body.getvalue()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# BAW: I'm not sure if the intent was for the signature of this function to be
|
||||||
|
# the same as base64MIME.decode() or not...
|
||||||
|
def decode(encoded, eol=NL):
|
||||||
|
"""Decode a quoted-printable string.
|
||||||
|
|
||||||
|
Lines are separated with eol, which defaults to \\n.
|
||||||
|
"""
|
||||||
|
if not encoded:
|
||||||
|
return encoded
|
||||||
|
# BAW: see comment in encode() above. Again, we're building up the
|
||||||
|
# decoded string with string concatenation, which could be done much more
|
||||||
|
# efficiently.
|
||||||
|
decoded = ''
|
||||||
|
|
||||||
|
for line in encoded.splitlines():
|
||||||
|
line = line.rstrip()
|
||||||
|
if not line:
|
||||||
|
decoded += eol
|
||||||
|
continue
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
n = len(line)
|
||||||
|
while i < n:
|
||||||
|
c = line[i]
|
||||||
|
if c != '=':
|
||||||
|
decoded += c
|
||||||
|
i += 1
|
||||||
|
# Otherwise, c == "=". Are we at the end of the line? If so, add
|
||||||
|
# a soft line break.
|
||||||
|
elif i+1 == n:
|
||||||
|
i += 1
|
||||||
|
continue
|
||||||
|
# Decode if in form =AB
|
||||||
|
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
|
||||||
|
decoded += unquote(line[i:i+3])
|
||||||
|
i += 3
|
||||||
|
# Otherwise, not in form =AB, pass literally
|
||||||
|
else:
|
||||||
|
decoded += c
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
if i == n:
|
||||||
|
decoded += eol
|
||||||
|
# Special case if original string did not end with eol
|
||||||
|
if encoded[-1] not in '\r\n' and decoded.endswith(eol):
|
||||||
|
decoded = decoded[:-1]
|
||||||
|
return decoded
|
||||||
|
|
||||||
|
|
||||||
|
# For convenience and backwards compatibility w/ standard base64 module
|
||||||
|
body_decode = decode
|
||||||
|
decodestring = decode
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _unquote_match(match):
|
||||||
|
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
|
||||||
|
s = match.group(0)
|
||||||
|
return unquote(s)
|
||||||
|
|
||||||
|
|
||||||
|
# Header decoding is done a bit differently
|
||||||
|
def header_decode(s):
|
||||||
|
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
|
||||||
|
|
||||||
|
This function does not parse a full MIME header value encoded with
|
||||||
|
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
|
||||||
|
the high level email.header class for that functionality.
|
||||||
|
"""
|
||||||
|
s = s.replace('_', ' ')
|
||||||
|
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII)
|
400
venv/Lib/site-packages/future/backports/email/utils.py
Normal file
400
venv/Lib/site-packages/future/backports/email/utils.py
Normal file
@ -0,0 +1,400 @@
|
|||||||
|
# Copyright (C) 2001-2010 Python Software Foundation
|
||||||
|
# Author: Barry Warsaw
|
||||||
|
# Contact: email-sig@python.org
|
||||||
|
|
||||||
|
"""Miscellaneous utilities."""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from future import utils
|
||||||
|
from future.builtins import bytes, int, str
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'collapse_rfc2231_value',
|
||||||
|
'decode_params',
|
||||||
|
'decode_rfc2231',
|
||||||
|
'encode_rfc2231',
|
||||||
|
'formataddr',
|
||||||
|
'formatdate',
|
||||||
|
'format_datetime',
|
||||||
|
'getaddresses',
|
||||||
|
'make_msgid',
|
||||||
|
'mktime_tz',
|
||||||
|
'parseaddr',
|
||||||
|
'parsedate',
|
||||||
|
'parsedate_tz',
|
||||||
|
'parsedate_to_datetime',
|
||||||
|
'unquote',
|
||||||
|
]
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
if utils.PY2:
|
||||||
|
re.ASCII = 0
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
import random
|
||||||
|
import socket
|
||||||
|
from future.backports import datetime
|
||||||
|
from future.backports.urllib.parse import quote as url_quote, unquote as url_unquote
|
||||||
|
import warnings
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
from future.backports.email._parseaddr import quote
|
||||||
|
from future.backports.email._parseaddr import AddressList as _AddressList
|
||||||
|
from future.backports.email._parseaddr import mktime_tz
|
||||||
|
|
||||||
|
from future.backports.email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
|
||||||
|
|
||||||
|
from quopri import decodestring as _qdecode
|
||||||
|
|
||||||
|
# Intrapackage imports
|
||||||
|
from future.backports.email.encoders import _bencode, _qencode
|
||||||
|
from future.backports.email.charset import Charset
|
||||||
|
|
||||||
|
COMMASPACE = ', '
|
||||||
|
EMPTYSTRING = ''
|
||||||
|
UEMPTYSTRING = ''
|
||||||
|
CRLF = '\r\n'
|
||||||
|
TICK = "'"
|
||||||
|
|
||||||
|
specialsre = re.compile(r'[][\\()<>@,:;".]')
|
||||||
|
escapesre = re.compile(r'[\\"]')
|
||||||
|
|
||||||
|
# How to figure out if we are processing strings that come from a byte
|
||||||
|
# source with undecodable characters.
|
||||||
|
_has_surrogates = re.compile(
|
||||||
|
'([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search
|
||||||
|
|
||||||
|
# How to deal with a string containing bytes before handing it to the
|
||||||
|
# application through the 'normal' interface.
|
||||||
|
def _sanitize(string):
|
||||||
|
# Turn any escaped bytes into unicode 'unknown' char.
|
||||||
|
original_bytes = string.encode('ascii', 'surrogateescape')
|
||||||
|
return original_bytes.decode('ascii', 'replace')
|
||||||
|
|
||||||
|
|
||||||
|
# Helpers
|
||||||
|
|
||||||
|
def formataddr(pair, charset='utf-8'):
|
||||||
|
"""The inverse of parseaddr(), this takes a 2-tuple of the form
|
||||||
|
(realname, email_address) and returns the string value suitable
|
||||||
|
for an RFC 2822 From, To or Cc header.
|
||||||
|
|
||||||
|
If the first element of pair is false, then the second element is
|
||||||
|
returned unmodified.
|
||||||
|
|
||||||
|
Optional charset if given is the character set that is used to encode
|
||||||
|
realname in case realname is not ASCII safe. Can be an instance of str or
|
||||||
|
a Charset-like object which has a header_encode method. Default is
|
||||||
|
'utf-8'.
|
||||||
|
"""
|
||||||
|
name, address = pair
|
||||||
|
# The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
|
||||||
|
address.encode('ascii')
|
||||||
|
if name:
|
||||||
|
try:
|
||||||
|
name.encode('ascii')
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
if isinstance(charset, str):
|
||||||
|
charset = Charset(charset)
|
||||||
|
encoded_name = charset.header_encode(name)
|
||||||
|
return "%s <%s>" % (encoded_name, address)
|
||||||
|
else:
|
||||||
|
quotes = ''
|
||||||
|
if specialsre.search(name):
|
||||||
|
quotes = '"'
|
||||||
|
name = escapesre.sub(r'\\\g<0>', name)
|
||||||
|
return '%s%s%s <%s>' % (quotes, name, quotes, address)
|
||||||
|
return address
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def getaddresses(fieldvalues):
|
||||||
|
"""Return a list of (REALNAME, EMAIL) for each fieldvalue."""
|
||||||
|
all = COMMASPACE.join(fieldvalues)
|
||||||
|
a = _AddressList(all)
|
||||||
|
return a.addresslist
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ecre = re.compile(r'''
|
||||||
|
=\? # literal =?
|
||||||
|
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
|
||||||
|
\? # literal ?
|
||||||
|
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
|
||||||
|
\? # literal ?
|
||||||
|
(?P<atom>.*?) # non-greedy up to the next ?= is the atom
|
||||||
|
\?= # literal ?=
|
||||||
|
''', re.VERBOSE | re.IGNORECASE)
|
||||||
|
|
||||||
|
|
||||||
|
def _format_timetuple_and_zone(timetuple, zone):
|
||||||
|
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
|
||||||
|
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
|
||||||
|
timetuple[2],
|
||||||
|
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||||
|
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
|
||||||
|
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
|
||||||
|
zone)
|
||||||
|
|
||||||
|
def formatdate(timeval=None, localtime=False, usegmt=False):
|
||||||
|
"""Returns a date string as specified by RFC 2822, e.g.:
|
||||||
|
|
||||||
|
Fri, 09 Nov 2001 01:08:47 -0000
|
||||||
|
|
||||||
|
Optional timeval if given is a floating point time value as accepted by
|
||||||
|
gmtime() and localtime(), otherwise the current time is used.
|
||||||
|
|
||||||
|
Optional localtime is a flag that when True, interprets timeval, and
|
||||||
|
returns a date relative to the local timezone instead of UTC, properly
|
||||||
|
taking daylight savings time into account.
|
||||||
|
|
||||||
|
Optional argument usegmt means that the timezone is written out as
|
||||||
|
an ascii string, not numeric one (so "GMT" instead of "+0000"). This
|
||||||
|
is needed for HTTP, and is only used when localtime==False.
|
||||||
|
"""
|
||||||
|
# Note: we cannot use strftime() because that honors the locale and RFC
|
||||||
|
# 2822 requires that day and month names be the English abbreviations.
|
||||||
|
if timeval is None:
|
||||||
|
timeval = time.time()
|
||||||
|
if localtime:
|
||||||
|
now = time.localtime(timeval)
|
||||||
|
# Calculate timezone offset, based on whether the local zone has
|
||||||
|
# daylight savings time, and whether DST is in effect.
|
||||||
|
if time.daylight and now[-1]:
|
||||||
|
offset = time.altzone
|
||||||
|
else:
|
||||||
|
offset = time.timezone
|
||||||
|
hours, minutes = divmod(abs(offset), 3600)
|
||||||
|
# Remember offset is in seconds west of UTC, but the timezone is in
|
||||||
|
# minutes east of UTC, so the signs differ.
|
||||||
|
if offset > 0:
|
||||||
|
sign = '-'
|
||||||
|
else:
|
||||||
|
sign = '+'
|
||||||
|
zone = '%s%02d%02d' % (sign, hours, minutes // 60)
|
||||||
|
else:
|
||||||
|
now = time.gmtime(timeval)
|
||||||
|
# Timezone offset is always -0000
|
||||||
|
if usegmt:
|
||||||
|
zone = 'GMT'
|
||||||
|
else:
|
||||||
|
zone = '-0000'
|
||||||
|
return _format_timetuple_and_zone(now, zone)
|
||||||
|
|
||||||
|
def format_datetime(dt, usegmt=False):
|
||||||
|
"""Turn a datetime into a date string as specified in RFC 2822.
|
||||||
|
|
||||||
|
If usegmt is True, dt must be an aware datetime with an offset of zero. In
|
||||||
|
this case 'GMT' will be rendered instead of the normal +0000 required by
|
||||||
|
RFC2822. This is to support HTTP headers involving date stamps.
|
||||||
|
"""
|
||||||
|
now = dt.timetuple()
|
||||||
|
if usegmt:
|
||||||
|
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
|
||||||
|
raise ValueError("usegmt option requires a UTC datetime")
|
||||||
|
zone = 'GMT'
|
||||||
|
elif dt.tzinfo is None:
|
||||||
|
zone = '-0000'
|
||||||
|
else:
|
||||||
|
zone = dt.strftime("%z")
|
||||||
|
return _format_timetuple_and_zone(now, zone)
|
||||||
|
|
||||||
|
|
||||||
|
def make_msgid(idstring=None, domain=None):
|
||||||
|
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
|
||||||
|
|
||||||
|
<20020201195627.33539.96671@nightshade.la.mastaler.com>
|
||||||
|
|
||||||
|
Optional idstring if given is a string used to strengthen the
|
||||||
|
uniqueness of the message id. Optional domain if given provides the
|
||||||
|
portion of the message id after the '@'. It defaults to the locally
|
||||||
|
defined hostname.
|
||||||
|
"""
|
||||||
|
timeval = time.time()
|
||||||
|
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
|
||||||
|
pid = os.getpid()
|
||||||
|
randint = random.randrange(100000)
|
||||||
|
if idstring is None:
|
||||||
|
idstring = ''
|
||||||
|
else:
|
||||||
|
idstring = '.' + idstring
|
||||||
|
if domain is None:
|
||||||
|
domain = socket.getfqdn()
|
||||||
|
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
|
||||||
|
return msgid
|
||||||
|
|
||||||
|
|
||||||
|
def parsedate_to_datetime(data):
|
||||||
|
_3to2list = list(_parsedate_tz(data))
|
||||||
|
dtuple, tz, = [_3to2list[:-1]] + _3to2list[-1:]
|
||||||
|
if tz is None:
|
||||||
|
return datetime.datetime(*dtuple[:6])
|
||||||
|
return datetime.datetime(*dtuple[:6],
|
||||||
|
tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
|
||||||
|
|
||||||
|
|
||||||
|
def parseaddr(addr):
|
||||||
|
addrs = _AddressList(addr).addresslist
|
||||||
|
if not addrs:
|
||||||
|
return '', ''
|
||||||
|
return addrs[0]
|
||||||
|
|
||||||
|
|
||||||
|
# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
|
||||||
|
def unquote(str):
|
||||||
|
"""Remove quotes from a string."""
|
||||||
|
if len(str) > 1:
|
||||||
|
if str.startswith('"') and str.endswith('"'):
|
||||||
|
return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
|
||||||
|
if str.startswith('<') and str.endswith('>'):
|
||||||
|
return str[1:-1]
|
||||||
|
return str
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# RFC2231-related functions - parameter encoding and decoding
|
||||||
|
def decode_rfc2231(s):
|
||||||
|
"""Decode string according to RFC 2231"""
|
||||||
|
parts = s.split(TICK, 2)
|
||||||
|
if len(parts) <= 2:
|
||||||
|
return None, None, s
|
||||||
|
return parts
|
||||||
|
|
||||||
|
|
||||||
|
def encode_rfc2231(s, charset=None, language=None):
|
||||||
|
"""Encode string according to RFC 2231.
|
||||||
|
|
||||||
|
If neither charset nor language is given, then s is returned as-is. If
|
||||||
|
charset is given but not language, the string is encoded using the empty
|
||||||
|
string for language.
|
||||||
|
"""
|
||||||
|
s = url_quote(s, safe='', encoding=charset or 'ascii')
|
||||||
|
if charset is None and language is None:
|
||||||
|
return s
|
||||||
|
if language is None:
|
||||||
|
language = ''
|
||||||
|
return "%s'%s'%s" % (charset, language, s)
|
||||||
|
|
||||||
|
|
||||||
|
rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
|
||||||
|
re.ASCII)
|
||||||
|
|
||||||
|
def decode_params(params):
|
||||||
|
"""Decode parameters list according to RFC 2231.
|
||||||
|
|
||||||
|
params is a sequence of 2-tuples containing (param name, string value).
|
||||||
|
"""
|
||||||
|
# Copy params so we don't mess with the original
|
||||||
|
params = params[:]
|
||||||
|
new_params = []
|
||||||
|
# Map parameter's name to a list of continuations. The values are a
|
||||||
|
# 3-tuple of the continuation number, the string value, and a flag
|
||||||
|
# specifying whether a particular segment is %-encoded.
|
||||||
|
rfc2231_params = {}
|
||||||
|
name, value = params.pop(0)
|
||||||
|
new_params.append((name, value))
|
||||||
|
while params:
|
||||||
|
name, value = params.pop(0)
|
||||||
|
if name.endswith('*'):
|
||||||
|
encoded = True
|
||||||
|
else:
|
||||||
|
encoded = False
|
||||||
|
value = unquote(value)
|
||||||
|
mo = rfc2231_continuation.match(name)
|
||||||
|
if mo:
|
||||||
|
name, num = mo.group('name', 'num')
|
||||||
|
if num is not None:
|
||||||
|
num = int(num)
|
||||||
|
rfc2231_params.setdefault(name, []).append((num, value, encoded))
|
||||||
|
else:
|
||||||
|
new_params.append((name, '"%s"' % quote(value)))
|
||||||
|
if rfc2231_params:
|
||||||
|
for name, continuations in rfc2231_params.items():
|
||||||
|
value = []
|
||||||
|
extended = False
|
||||||
|
# Sort by number
|
||||||
|
continuations.sort()
|
||||||
|
# And now append all values in numerical order, converting
|
||||||
|
# %-encodings for the encoded segments. If any of the
|
||||||
|
# continuation names ends in a *, then the entire string, after
|
||||||
|
# decoding segments and concatenating, must have the charset and
|
||||||
|
# language specifiers at the beginning of the string.
|
||||||
|
for num, s, encoded in continuations:
|
||||||
|
if encoded:
|
||||||
|
# Decode as "latin-1", so the characters in s directly
|
||||||
|
# represent the percent-encoded octet values.
|
||||||
|
# collapse_rfc2231_value treats this as an octet sequence.
|
||||||
|
s = url_unquote(s, encoding="latin-1")
|
||||||
|
extended = True
|
||||||
|
value.append(s)
|
||||||
|
value = quote(EMPTYSTRING.join(value))
|
||||||
|
if extended:
|
||||||
|
charset, language, value = decode_rfc2231(value)
|
||||||
|
new_params.append((name, (charset, language, '"%s"' % value)))
|
||||||
|
else:
|
||||||
|
new_params.append((name, '"%s"' % value))
|
||||||
|
return new_params
|
||||||
|
|
||||||
|
def collapse_rfc2231_value(value, errors='replace',
|
||||||
|
fallback_charset='us-ascii'):
|
||||||
|
if not isinstance(value, tuple) or len(value) != 3:
|
||||||
|
return unquote(value)
|
||||||
|
# While value comes to us as a unicode string, we need it to be a bytes
|
||||||
|
# object. We do not want bytes() normal utf-8 decoder, we want a straight
|
||||||
|
# interpretation of the string as character bytes.
|
||||||
|
charset, language, text = value
|
||||||
|
rawbytes = bytes(text, 'raw-unicode-escape')
|
||||||
|
try:
|
||||||
|
return str(rawbytes, charset, errors)
|
||||||
|
except LookupError:
|
||||||
|
# charset is not a known codec.
|
||||||
|
return unquote(text)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# datetime doesn't provide a localtime function yet, so provide one. Code
|
||||||
|
# adapted from the patch in issue 9527. This may not be perfect, but it is
|
||||||
|
# better than not having it.
|
||||||
|
#
|
||||||
|
|
||||||
|
def localtime(dt=None, isdst=-1):
|
||||||
|
"""Return local time as an aware datetime object.
|
||||||
|
|
||||||
|
If called without arguments, return current time. Otherwise *dt*
|
||||||
|
argument should be a datetime instance, and it is converted to the
|
||||||
|
local time zone according to the system time zone database. If *dt* is
|
||||||
|
naive (that is, dt.tzinfo is None), it is assumed to be in local time.
|
||||||
|
In this case, a positive or zero value for *isdst* causes localtime to
|
||||||
|
presume initially that summer time (for example, Daylight Saving Time)
|
||||||
|
is or is not (respectively) in effect for the specified time. A
|
||||||
|
negative value for *isdst* causes the localtime() function to attempt
|
||||||
|
to divine whether summer time is in effect for the specified time.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if dt is None:
|
||||||
|
return datetime.datetime.now(datetime.timezone.utc).astimezone()
|
||||||
|
if dt.tzinfo is not None:
|
||||||
|
return dt.astimezone()
|
||||||
|
# We have a naive datetime. Convert to a (localtime) timetuple and pass to
|
||||||
|
# system mktime together with the isdst hint. System mktime will return
|
||||||
|
# seconds since epoch.
|
||||||
|
tm = dt.timetuple()[:-1] + (isdst,)
|
||||||
|
seconds = time.mktime(tm)
|
||||||
|
localtm = time.localtime(seconds)
|
||||||
|
try:
|
||||||
|
delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
|
||||||
|
tz = datetime.timezone(delta, localtm.tm_zone)
|
||||||
|
except AttributeError:
|
||||||
|
# Compute UTC offset and compare with the value implied by tm_isdst.
|
||||||
|
# If the values match, use the zone name implied by tm_isdst.
|
||||||
|
delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
|
||||||
|
dst = time.daylight and localtm.tm_isdst > 0
|
||||||
|
gmtoff = -(time.altzone if dst else time.timezone)
|
||||||
|
if delta == datetime.timedelta(seconds=gmtoff):
|
||||||
|
tz = datetime.timezone(delta, time.tzname[dst])
|
||||||
|
else:
|
||||||
|
tz = datetime.timezone(delta)
|
||||||
|
return dt.replace(tzinfo=tz)
|
27
venv/Lib/site-packages/future/backports/html/__init__.py
Normal file
27
venv/Lib/site-packages/future/backports/html/__init__.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
"""
|
||||||
|
General functions for HTML manipulation, backported from Py3.
|
||||||
|
|
||||||
|
Note that this uses Python 2.7 code with the corresponding Python 3
|
||||||
|
module names and locations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
|
||||||
|
_escape_map = {ord('&'): '&', ord('<'): '<', ord('>'): '>'}
|
||||||
|
_escape_map_full = {ord('&'): '&', ord('<'): '<', ord('>'): '>',
|
||||||
|
ord('"'): '"', ord('\''): '''}
|
||||||
|
|
||||||
|
# NB: this is a candidate for a bytes/string polymorphic interface
|
||||||
|
|
||||||
|
def escape(s, quote=True):
|
||||||
|
"""
|
||||||
|
Replace special characters "&", "<" and ">" to HTML-safe sequences.
|
||||||
|
If the optional flag quote is true (the default), the quotation mark
|
||||||
|
characters, both double quote (") and single quote (') characters are also
|
||||||
|
translated.
|
||||||
|
"""
|
||||||
|
assert not isinstance(s, bytes), 'Pass a unicode string'
|
||||||
|
if quote:
|
||||||
|
return s.translate(_escape_map_full)
|
||||||
|
return s.translate(_escape_map)
|
2514
venv/Lib/site-packages/future/backports/html/entities.py
Normal file
2514
venv/Lib/site-packages/future/backports/html/entities.py
Normal file
File diff suppressed because it is too large
Load Diff
536
venv/Lib/site-packages/future/backports/html/parser.py
Normal file
536
venv/Lib/site-packages/future/backports/html/parser.py
Normal file
@ -0,0 +1,536 @@
|
|||||||
|
"""A parser for HTML and XHTML.
|
||||||
|
|
||||||
|
Backported for python-future from Python 3.3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# This file is based on sgmllib.py, but the API is slightly different.
|
||||||
|
|
||||||
|
# XXX There should be a way to distinguish between PCDATA (parsed
|
||||||
|
# character data -- the normal case), RCDATA (replaceable character
|
||||||
|
# data -- only char and entity references and end tags are special)
|
||||||
|
# and CDATA (character data -- only end tags are special).
|
||||||
|
|
||||||
|
from __future__ import (absolute_import, division,
|
||||||
|
print_function, unicode_literals)
|
||||||
|
from future.builtins import *
|
||||||
|
from future.backports import _markupbase
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# Regular expressions used for parsing
|
||||||
|
|
||||||
|
interesting_normal = re.compile('[&<]')
|
||||||
|
incomplete = re.compile('&[a-zA-Z#]')
|
||||||
|
|
||||||
|
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
|
||||||
|
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
|
||||||
|
|
||||||
|
starttagopen = re.compile('<[a-zA-Z]')
|
||||||
|
piclose = re.compile('>')
|
||||||
|
commentclose = re.compile(r'--\s*>')
|
||||||
|
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
|
||||||
|
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
|
||||||
|
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
|
||||||
|
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
|
||||||
|
# Note:
|
||||||
|
# 1) the strict attrfind isn't really strict, but we can't make it
|
||||||
|
# correctly strict without breaking backward compatibility;
|
||||||
|
# 2) if you change attrfind remember to update locatestarttagend too;
|
||||||
|
# 3) if you change attrfind and/or locatestarttagend the parser will
|
||||||
|
# explode, so don't do it.
|
||||||
|
attrfind = re.compile(
|
||||||
|
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
|
||||||
|
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
|
||||||
|
attrfind_tolerant = re.compile(
|
||||||
|
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||||
|
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
|
||||||
|
locatestarttagend = re.compile(r"""
|
||||||
|
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
|
||||||
|
(?:\s+ # whitespace before attribute name
|
||||||
|
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
|
||||||
|
(?:\s*=\s* # value indicator
|
||||||
|
(?:'[^']*' # LITA-enclosed value
|
||||||
|
|\"[^\"]*\" # LIT-enclosed value
|
||||||
|
|[^'\">\s]+ # bare value
|
||||||
|
)
|
||||||
|
)?
|
||||||
|
)
|
||||||
|
)*
|
||||||
|
\s* # trailing whitespace
|
||||||
|
""", re.VERBOSE)
|
||||||
|
locatestarttagend_tolerant = re.compile(r"""
|
||||||
|
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
|
||||||
|
(?:[\s/]* # optional whitespace before attribute name
|
||||||
|
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
|
||||||
|
(?:\s*=+\s* # value indicator
|
||||||
|
(?:'[^']*' # LITA-enclosed value
|
||||||
|
|"[^"]*" # LIT-enclosed value
|
||||||
|
|(?!['"])[^>\s]* # bare value
|
||||||
|
)
|
||||||
|
(?:\s*,)* # possibly followed by a comma
|
||||||
|
)?(?:\s|/(?!>))*
|
||||||
|
)*
|
||||||
|
)?
|
||||||
|
\s* # trailing whitespace
|
||||||
|
""", re.VERBOSE)
|
||||||
|
endendtag = re.compile('>')
|
||||||
|
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
|
||||||
|
# </ and the tag name, so maybe this should be fixed
|
||||||
|
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLParseError(Exception):
|
||||||
|
"""Exception raised for all parse errors."""
|
||||||
|
|
||||||
|
def __init__(self, msg, position=(None, None)):
|
||||||
|
assert msg
|
||||||
|
self.msg = msg
|
||||||
|
self.lineno = position[0]
|
||||||
|
self.offset = position[1]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
result = self.msg
|
||||||
|
if self.lineno is not None:
|
||||||
|
result = result + ", at line %d" % self.lineno
|
||||||
|
if self.offset is not None:
|
||||||
|
result = result + ", column %d" % (self.offset + 1)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLParser(_markupbase.ParserBase):
|
||||||
|
"""Find tags and other markup and call handler functions.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
p = HTMLParser()
|
||||||
|
p.feed(data)
|
||||||
|
...
|
||||||
|
p.close()
|
||||||
|
|
||||||
|
Start tags are handled by calling self.handle_starttag() or
|
||||||
|
self.handle_startendtag(); end tags by self.handle_endtag(). The
|
||||||
|
data between tags is passed from the parser to the derived class
|
||||||
|
by calling self.handle_data() with the data as argument (the data
|
||||||
|
may be split up in arbitrary chunks). Entity references are
|
||||||
|
passed by calling self.handle_entityref() with the entity
|
||||||
|
reference as the argument. Numeric character references are
|
||||||
|
passed to self.handle_charref() with the string containing the
|
||||||
|
reference as the argument.
|
||||||
|
"""
|
||||||
|
|
||||||
|
CDATA_CONTENT_ELEMENTS = ("script", "style")
|
||||||
|
|
||||||
|
def __init__(self, strict=False):
|
||||||
|
"""Initialize and reset this instance.
|
||||||
|
|
||||||
|
If strict is set to False (the default) the parser will parse invalid
|
||||||
|
markup, otherwise it will raise an error. Note that the strict mode
|
||||||
|
is deprecated.
|
||||||
|
"""
|
||||||
|
if strict:
|
||||||
|
warnings.warn("The strict mode is deprecated.",
|
||||||
|
DeprecationWarning, stacklevel=2)
|
||||||
|
self.strict = strict
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Reset this instance. Loses all unprocessed data."""
|
||||||
|
self.rawdata = ''
|
||||||
|
self.lasttag = '???'
|
||||||
|
self.interesting = interesting_normal
|
||||||
|
self.cdata_elem = None
|
||||||
|
_markupbase.ParserBase.reset(self)
|
||||||
|
|
||||||
|
def feed(self, data):
|
||||||
|
r"""Feed data to the parser.
|
||||||
|
|
||||||
|
Call this as often as you want, with as little or as much text
|
||||||
|
as you want (may include '\n').
|
||||||
|
"""
|
||||||
|
self.rawdata = self.rawdata + data
|
||||||
|
self.goahead(0)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Handle any buffered data."""
|
||||||
|
self.goahead(1)
|
||||||
|
|
||||||
|
def error(self, message):
|
||||||
|
raise HTMLParseError(message, self.getpos())
|
||||||
|
|
||||||
|
__starttag_text = None
|
||||||
|
|
||||||
|
def get_starttag_text(self):
|
||||||
|
"""Return full source of start tag: '<...>'."""
|
||||||
|
return self.__starttag_text
|
||||||
|
|
||||||
|
def set_cdata_mode(self, elem):
|
||||||
|
self.cdata_elem = elem.lower()
|
||||||
|
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
|
||||||
|
|
||||||
|
def clear_cdata_mode(self):
|
||||||
|
self.interesting = interesting_normal
|
||||||
|
self.cdata_elem = None
|
||||||
|
|
||||||
|
# Internal -- handle data as far as reasonable. May leave state
|
||||||
|
# and data to be processed by a subsequent call. If 'end' is
|
||||||
|
# true, force handling all data as if followed by EOF marker.
|
||||||
|
def goahead(self, end):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
i = 0
|
||||||
|
n = len(rawdata)
|
||||||
|
while i < n:
|
||||||
|
match = self.interesting.search(rawdata, i) # < or &
|
||||||
|
if match:
|
||||||
|
j = match.start()
|
||||||
|
else:
|
||||||
|
if self.cdata_elem:
|
||||||
|
break
|
||||||
|
j = n
|
||||||
|
if i < j: self.handle_data(rawdata[i:j])
|
||||||
|
i = self.updatepos(i, j)
|
||||||
|
if i == n: break
|
||||||
|
startswith = rawdata.startswith
|
||||||
|
if startswith('<', i):
|
||||||
|
if starttagopen.match(rawdata, i): # < + letter
|
||||||
|
k = self.parse_starttag(i)
|
||||||
|
elif startswith("</", i):
|
||||||
|
k = self.parse_endtag(i)
|
||||||
|
elif startswith("<!--", i):
|
||||||
|
k = self.parse_comment(i)
|
||||||
|
elif startswith("<?", i):
|
||||||
|
k = self.parse_pi(i)
|
||||||
|
elif startswith("<!", i):
|
||||||
|
if self.strict:
|
||||||
|
k = self.parse_declaration(i)
|
||||||
|
else:
|
||||||
|
k = self.parse_html_declaration(i)
|
||||||
|
elif (i + 1) < n:
|
||||||
|
self.handle_data("<")
|
||||||
|
k = i + 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
if k < 0:
|
||||||
|
if not end:
|
||||||
|
break
|
||||||
|
if self.strict:
|
||||||
|
self.error("EOF in middle of construct")
|
||||||
|
k = rawdata.find('>', i + 1)
|
||||||
|
if k < 0:
|
||||||
|
k = rawdata.find('<', i + 1)
|
||||||
|
if k < 0:
|
||||||
|
k = i + 1
|
||||||
|
else:
|
||||||
|
k += 1
|
||||||
|
self.handle_data(rawdata[i:k])
|
||||||
|
i = self.updatepos(i, k)
|
||||||
|
elif startswith("&#", i):
|
||||||
|
match = charref.match(rawdata, i)
|
||||||
|
if match:
|
||||||
|
name = match.group()[2:-1]
|
||||||
|
self.handle_charref(name)
|
||||||
|
k = match.end()
|
||||||
|
if not startswith(';', k-1):
|
||||||
|
k = k - 1
|
||||||
|
i = self.updatepos(i, k)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
if ";" in rawdata[i:]: #bail by consuming &#
|
||||||
|
self.handle_data(rawdata[0:2])
|
||||||
|
i = self.updatepos(i, 2)
|
||||||
|
break
|
||||||
|
elif startswith('&', i):
|
||||||
|
match = entityref.match(rawdata, i)
|
||||||
|
if match:
|
||||||
|
name = match.group(1)
|
||||||
|
self.handle_entityref(name)
|
||||||
|
k = match.end()
|
||||||
|
if not startswith(';', k-1):
|
||||||
|
k = k - 1
|
||||||
|
i = self.updatepos(i, k)
|
||||||
|
continue
|
||||||
|
match = incomplete.match(rawdata, i)
|
||||||
|
if match:
|
||||||
|
# match.group() will contain at least 2 chars
|
||||||
|
if end and match.group() == rawdata[i:]:
|
||||||
|
if self.strict:
|
||||||
|
self.error("EOF in middle of entity or char ref")
|
||||||
|
else:
|
||||||
|
if k <= i:
|
||||||
|
k = n
|
||||||
|
i = self.updatepos(i, i + 1)
|
||||||
|
# incomplete
|
||||||
|
break
|
||||||
|
elif (i + 1) < n:
|
||||||
|
# not the end of the buffer, and can't be confused
|
||||||
|
# with some other construct
|
||||||
|
self.handle_data("&")
|
||||||
|
i = self.updatepos(i, i + 1)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
assert 0, "interesting.search() lied"
|
||||||
|
# end while
|
||||||
|
if end and i < n and not self.cdata_elem:
|
||||||
|
self.handle_data(rawdata[i:n])
|
||||||
|
i = self.updatepos(i, n)
|
||||||
|
self.rawdata = rawdata[i:]
|
||||||
|
|
||||||
|
# Internal -- parse html declarations, return length or -1 if not terminated
|
||||||
|
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
|
||||||
|
# See also parse_declaration in _markupbase
|
||||||
|
def parse_html_declaration(self, i):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
assert rawdata[i:i+2] == '<!', ('unexpected call to '
|
||||||
|
'parse_html_declaration()')
|
||||||
|
if rawdata[i:i+4] == '<!--':
|
||||||
|
# this case is actually already handled in goahead()
|
||||||
|
return self.parse_comment(i)
|
||||||
|
elif rawdata[i:i+3] == '<![':
|
||||||
|
return self.parse_marked_section(i)
|
||||||
|
elif rawdata[i:i+9].lower() == '<!doctype':
|
||||||
|
# find the closing >
|
||||||
|
gtpos = rawdata.find('>', i+9)
|
||||||
|
if gtpos == -1:
|
||||||
|
return -1
|
||||||
|
self.handle_decl(rawdata[i+2:gtpos])
|
||||||
|
return gtpos+1
|
||||||
|
else:
|
||||||
|
return self.parse_bogus_comment(i)
|
||||||
|
|
||||||
|
# Internal -- parse bogus comment, return length or -1 if not terminated
|
||||||
|
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
|
||||||
|
def parse_bogus_comment(self, i, report=1):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
|
||||||
|
'parse_comment()')
|
||||||
|
pos = rawdata.find('>', i+2)
|
||||||
|
if pos == -1:
|
||||||
|
return -1
|
||||||
|
if report:
|
||||||
|
self.handle_comment(rawdata[i+2:pos])
|
||||||
|
return pos + 1
|
||||||
|
|
||||||
|
# Internal -- parse processing instr, return end or -1 if not terminated
|
||||||
|
def parse_pi(self, i):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
|
||||||
|
match = piclose.search(rawdata, i+2) # >
|
||||||
|
if not match:
|
||||||
|
return -1
|
||||||
|
j = match.start()
|
||||||
|
self.handle_pi(rawdata[i+2: j])
|
||||||
|
j = match.end()
|
||||||
|
return j
|
||||||
|
|
||||||
|
# Internal -- handle starttag, return end or -1 if not terminated
|
||||||
|
def parse_starttag(self, i):
|
||||||
|
self.__starttag_text = None
|
||||||
|
endpos = self.check_for_whole_start_tag(i)
|
||||||
|
if endpos < 0:
|
||||||
|
return endpos
|
||||||
|
rawdata = self.rawdata
|
||||||
|
self.__starttag_text = rawdata[i:endpos]
|
||||||
|
|
||||||
|
# Now parse the data between i+1 and j into a tag and attrs
|
||||||
|
attrs = []
|
||||||
|
match = tagfind.match(rawdata, i+1)
|
||||||
|
assert match, 'unexpected call to parse_starttag()'
|
||||||
|
k = match.end()
|
||||||
|
self.lasttag = tag = match.group(1).lower()
|
||||||
|
while k < endpos:
|
||||||
|
if self.strict:
|
||||||
|
m = attrfind.match(rawdata, k)
|
||||||
|
else:
|
||||||
|
m = attrfind_tolerant.match(rawdata, k)
|
||||||
|
if not m:
|
||||||
|
break
|
||||||
|
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||||
|
if not rest:
|
||||||
|
attrvalue = None
|
||||||
|
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||||
|
attrvalue[:1] == '"' == attrvalue[-1:]:
|
||||||
|
attrvalue = attrvalue[1:-1]
|
||||||
|
if attrvalue:
|
||||||
|
attrvalue = self.unescape(attrvalue)
|
||||||
|
attrs.append((attrname.lower(), attrvalue))
|
||||||
|
k = m.end()
|
||||||
|
|
||||||
|
end = rawdata[k:endpos].strip()
|
||||||
|
if end not in (">", "/>"):
|
||||||
|
lineno, offset = self.getpos()
|
||||||
|
if "\n" in self.__starttag_text:
|
||||||
|
lineno = lineno + self.__starttag_text.count("\n")
|
||||||
|
offset = len(self.__starttag_text) \
|
||||||
|
- self.__starttag_text.rfind("\n")
|
||||||
|
else:
|
||||||
|
offset = offset + len(self.__starttag_text)
|
||||||
|
if self.strict:
|
||||||
|
self.error("junk characters in start tag: %r"
|
||||||
|
% (rawdata[k:endpos][:20],))
|
||||||
|
self.handle_data(rawdata[i:endpos])
|
||||||
|
return endpos
|
||||||
|
if end.endswith('/>'):
|
||||||
|
# XHTML-style empty tag: <span attr="value" />
|
||||||
|
self.handle_startendtag(tag, attrs)
|
||||||
|
else:
|
||||||
|
self.handle_starttag(tag, attrs)
|
||||||
|
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||||
|
self.set_cdata_mode(tag)
|
||||||
|
return endpos
|
||||||
|
|
||||||
|
# Internal -- check to see if we have a complete starttag; return end
|
||||||
|
# or -1 if incomplete.
|
||||||
|
def check_for_whole_start_tag(self, i):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
if self.strict:
|
||||||
|
m = locatestarttagend.match(rawdata, i)
|
||||||
|
else:
|
||||||
|
m = locatestarttagend_tolerant.match(rawdata, i)
|
||||||
|
if m:
|
||||||
|
j = m.end()
|
||||||
|
next = rawdata[j:j+1]
|
||||||
|
if next == ">":
|
||||||
|
return j + 1
|
||||||
|
if next == "/":
|
||||||
|
if rawdata.startswith("/>", j):
|
||||||
|
return j + 2
|
||||||
|
if rawdata.startswith("/", j):
|
||||||
|
# buffer boundary
|
||||||
|
return -1
|
||||||
|
# else bogus input
|
||||||
|
if self.strict:
|
||||||
|
self.updatepos(i, j + 1)
|
||||||
|
self.error("malformed empty start tag")
|
||||||
|
if j > i:
|
||||||
|
return j
|
||||||
|
else:
|
||||||
|
return i + 1
|
||||||
|
if next == "":
|
||||||
|
# end of input
|
||||||
|
return -1
|
||||||
|
if next in ("abcdefghijklmnopqrstuvwxyz=/"
|
||||||
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
|
||||||
|
# end of input in or before attribute value, or we have the
|
||||||
|
# '/' from a '/>' ending
|
||||||
|
return -1
|
||||||
|
if self.strict:
|
||||||
|
self.updatepos(i, j)
|
||||||
|
self.error("malformed start tag")
|
||||||
|
if j > i:
|
||||||
|
return j
|
||||||
|
else:
|
||||||
|
return i + 1
|
||||||
|
raise AssertionError("we should not get here!")
|
||||||
|
|
||||||
|
# Internal -- parse endtag, return end or -1 if incomplete
|
||||||
|
def parse_endtag(self, i):
|
||||||
|
rawdata = self.rawdata
|
||||||
|
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
|
||||||
|
match = endendtag.search(rawdata, i+1) # >
|
||||||
|
if not match:
|
||||||
|
return -1
|
||||||
|
gtpos = match.end()
|
||||||
|
match = endtagfind.match(rawdata, i) # </ + tag + >
|
||||||
|
if not match:
|
||||||
|
if self.cdata_elem is not None:
|
||||||
|
self.handle_data(rawdata[i:gtpos])
|
||||||
|
return gtpos
|
||||||
|
if self.strict:
|
||||||
|
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
|
||||||
|
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
|
||||||
|
namematch = tagfind_tolerant.match(rawdata, i+2)
|
||||||
|
if not namematch:
|
||||||
|
# w3.org/TR/html5/tokenization.html#end-tag-open-state
|
||||||
|
if rawdata[i:i+3] == '</>':
|
||||||
|
return i+3
|
||||||
|
else:
|
||||||
|
return self.parse_bogus_comment(i)
|
||||||
|
tagname = namematch.group().lower()
|
||||||
|
# consume and ignore other stuff between the name and the >
|
||||||
|
# Note: this is not 100% correct, since we might have things like
|
||||||
|
# </tag attr=">">, but looking for > after tha name should cover
|
||||||
|
# most of the cases and is much simpler
|
||||||
|
gtpos = rawdata.find('>', namematch.end())
|
||||||
|
self.handle_endtag(tagname)
|
||||||
|
return gtpos+1
|
||||||
|
|
||||||
|
elem = match.group(1).lower() # script or style
|
||||||
|
if self.cdata_elem is not None:
|
||||||
|
if elem != self.cdata_elem:
|
||||||
|
self.handle_data(rawdata[i:gtpos])
|
||||||
|
return gtpos
|
||||||
|
|
||||||
|
self.handle_endtag(elem.lower())
|
||||||
|
self.clear_cdata_mode()
|
||||||
|
return gtpos
|
||||||
|
|
||||||
|
# Overridable -- finish processing of start+end tag: <tag.../>
|
||||||
|
def handle_startendtag(self, tag, attrs):
|
||||||
|
self.handle_starttag(tag, attrs)
|
||||||
|
self.handle_endtag(tag)
|
||||||
|
|
||||||
|
# Overridable -- handle start tag
|
||||||
|
def handle_starttag(self, tag, attrs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle end tag
|
||||||
|
def handle_endtag(self, tag):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle character reference
|
||||||
|
def handle_charref(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle entity reference
|
||||||
|
def handle_entityref(self, name):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle data
|
||||||
|
def handle_data(self, data):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle comment
|
||||||
|
def handle_comment(self, data):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle declaration
|
||||||
|
def handle_decl(self, decl):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Overridable -- handle processing instruction
|
||||||
|
def handle_pi(self, data):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def unknown_decl(self, data):
|
||||||
|
if self.strict:
|
||||||
|
self.error("unknown declaration: %r" % (data,))
|
||||||
|
|
||||||
|
# Internal -- helper to remove special character quoting
|
||||||
|
def unescape(self, s):
|
||||||
|
if '&' not in s:
|
||||||
|
return s
|
||||||
|
def replaceEntities(s):
|
||||||
|
s = s.groups()[0]
|
||||||
|
try:
|
||||||
|
if s[0] == "#":
|
||||||
|
s = s[1:]
|
||||||
|
if s[0] in ['x','X']:
|
||||||
|
c = int(s[1:].rstrip(';'), 16)
|
||||||
|
else:
|
||||||
|
c = int(s.rstrip(';'))
|
||||||
|
return chr(c)
|
||||||
|
except ValueError:
|
||||||
|
return '&#' + s
|
||||||
|
else:
|
||||||
|
from future.backports.html.entities import html5
|
||||||
|
if s in html5:
|
||||||
|
return html5[s]
|
||||||
|
elif s.endswith(';'):
|
||||||
|
return '&' + s
|
||||||
|
for x in range(2, len(s)):
|
||||||
|
if s[:x] in html5:
|
||||||
|
return html5[s[:x]] + s[x:]
|
||||||
|
else:
|
||||||
|
return '&' + s
|
||||||
|
|
||||||
|
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
|
||||||
|
replaceEntities, s)
|
1346
venv/Lib/site-packages/future/backports/http/client.py
Normal file
1346
venv/Lib/site-packages/future/backports/http/client.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user