Merge branch 'master' into fusiontables-migration

This commit is contained in:
Tom Morris 2013-02-26 00:05:34 -05:00
commit 1e9a1c5f15
453 changed files with 179 additions and 197163 deletions

View File

@ -12,7 +12,7 @@
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/ant-tools-1.8.0.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/arithcode-1.1.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/butterfly-trunk.jar" sourcepath="main/webapp/WEB-INF/lib-src/butterfly-trunk-sources.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/clojure-1.1.0.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/clojure-1.4.0.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/commons-codec-1.5.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/commons-collections-3.2.1.jar"/>
<classpathentry exported="true" kind="lib" path="main/webapp/WEB-INF/lib/commons-fileupload-1.2.1.jar"/>
@ -46,7 +46,7 @@
<classpathentry exported="true" kind="lib" path="server/lib/slf4j-api-1.5.6.jar"/>
<classpathentry exported="true" kind="lib" path="server/lib/slf4j-log4j12-1.5.6.jar"/>
<classpathentry exported="true" kind="lib" path="broker/appengine/WEB-INF/lib/slf4j-jdk14-1.5.6.jar"/>
<classpathentry exported="true" kind="lib" path="extensions/jython/module/MOD-INF/lib/jython-2.5.1.jar"/>
<classpathentry exported="true" kind="lib" path="extensions/jython/module/MOD-INF/lib/jython-2.5.3.jar"/>
<classpathentry exported="true" kind="lib" path="broker/core/module/MOD-INF/lib/bdb-je-4.0.103.jar"/>
<classpathentry exported="true" kind="lib" path="extensions/gdata/module/MOD-INF/lib/gdata-core-1.0.jar"/>
<classpathentry exported="true" kind="lib" path="extensions/gdata/module/MOD-INF/lib/gdata-spreadsheet-3.0.jar"/>

View File

@ -13,7 +13,7 @@ Where can I get more information on ?
Look at the web site
http://github.com/OpenRefine/OpenRefine
http://github.com/OpenRefine/OpenRefine/wiki
Licensing and legal issues

View File

@ -1,578 +0,0 @@
"""HTTP server base class.
Note: the class in this module doesn't implement any HTTP request; see
SimpleHTTPServer for simple implementations of GET, HEAD and POST
(including CGI scripts). It does, however, optionally implement HTTP/1.1
persistent connections, as of version 0.3.
Contents:
- BaseHTTPRequestHandler: HTTP request handler base class
- test: test function
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.3"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import sys
import time
import socket # For gethostbyaddr()
import mimetools
import SocketServer
# Default error message
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
def _quote_html(html):
return html.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
class HTTPServer(SocketServer.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of mimetools.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = "HTTP/0.9" # Default
self.close_connection = 1
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if message is None:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", "text/html")
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content)
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
# print (self.protocol_version, code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(*args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# The Message-like class used to parse headers
MessageClass = mimetools.Message
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this server.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
test()

View File

@ -1,362 +0,0 @@
"""CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
save_cwd = os.getcwd()
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
os.chdir(save_cwd)
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()

View File

@ -1,640 +0,0 @@
"""Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class RawConfigParser:
def __init__(self, defaults=None):
self._sections = {}
self._defaults = {}
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = {}
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = {'__name__': sectname}
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e[0])
else:
break
if "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_match = re.compile(r"%\(([^)]+)\)s").match
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value):
"""Set an option. Extend ConfigParser.set: check for string values."""
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
ConfigParser.set(self, section, option, value)

View File

@ -1,746 +0,0 @@
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
self.update(rawdata)
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:

View File

@ -1,306 +0,0 @@
"""Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expressions does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '&quot;')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = methods.items()
method_items.sort()
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
def deg_to_rad(deg):
"""deg_to_rad(90) => 1.5707963267948966
Converts an angle in degrees to an angle in radians"""
import math
return deg * math.pi / 180
server = DocXMLRPCServer(("localhost", 8000))
server.set_server_title("Math Server")
server.set_server_name("Math XML-RPC Server")
server.set_server_documentation("""This server supports various mathematical functions.
You can use it from Python as follows:
>>> from xmlrpclib import ServerProxy
>>> s = ServerProxy("http://localhost:8000")
>>> s.deg_to_rad(90.0)
1.5707963267948966""")
server.register_function(deg_to_rad)
server.register_introspection_functions()
server.serve_forever()

File diff suppressed because it is too large Load Diff

View File

@ -1,369 +0,0 @@
"""A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import markupbase
import re
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
markupbase.ParserBase.reset(self)
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if end:
self.error("EOF in middle of construct")
break
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
self.error("malformed start tag")
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
self.error("bad end tag: %r" % (rawdata[i:j],))
tag = match.group(1)
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
s = s.replace("&lt;", "<")
s = s.replace("&gt;", ">")
s = s.replace("&apos;", "'")
s = s.replace("&quot;", '"')
s = s.replace("&amp;", "&") # Must be last
return s

View File

@ -1,181 +0,0 @@
"""Generic MIME writer.
This module defines the class MimeWriter. The MimeWriter class implements
a basic formatter for creating MIME multi-part files. It doesn't seek around
the output file nor does it use large amounts of buffer space. You must write
the parts out in the order that they should occur in the final file.
MimeWriter does buffer the headers you add, allowing you to rearrange their
order.
"""
import mimetools
__all__ = ["MimeWriter"]
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
"""Add a header line to the MIME message.
The key is the name of the header, where the value obviously provides
the value of the header. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to append.
"""
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
"""Writes out and forgets all headers accumulated so far.
This is useful if you don't need a body part at all; for example,
for a subpart of type message/rfc822 that's (mis)used to store some
header-like information.
"""
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
The content-type is set to the provided ctype, and the optional
parameter, plist, provides additional parameters for the
content-type declaration. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to insert at the start.
"""
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
Additionally, this method initializes the multi-part code, where the
subtype parameter provides the multipart subtype, the boundary
parameter may provide a user-defined boundary specification, and the
plist parameter provides optional parameters for the subtype. The
optional argument, prefix, determines where the header is inserted;
0 means append at the end, 1 means insert at the start. The default
is to insert at the start. Subparts should be created using the
nextpart() method.
"""
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
"""Returns a new instance of MimeWriter which represents an
individual part in a multipart message.
This may be used to write the part as well as used for creating
recursively complex multipart messages. The message must first be
initialized with the startmultipartbody() method before using the
nextpart() method.
"""
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
"""This is used to designate the last part of a multipart message.
It should always be used when writing multipart messages.
"""
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter

View File

@ -1,215 +0,0 @@
"""A multi-producer, multi-consumer queue."""
from time import time as _time
from collections import deque
__all__ = ['Empty', 'Full', 'Queue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
try:
import threading
except ImportError:
import dummy_threading as threading
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = self._empty()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = self._full()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if not block:
if self._full():
raise Full
elif timeout is None:
while self._full():
self.not_full.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._full():
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if self._empty():
raise Empty
elif timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Check whether the queue is empty
def _empty(self):
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()

View File

@ -1,218 +0,0 @@
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import urlparse
import cgi
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno()) if hasattr(os, 'fstat') else os.stat(path)
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write("<title>Directory listing for %s</title>\n" % displaypath)
f.write("<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urlparse.urlparse(path)[2]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()

View File

@ -1,595 +0,0 @@
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the strings methods
return list_public_methods(self) + \
['string.' + method for method in list_public_methods(self.string)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
from xmlrpclib import Fault
import SocketServer
import BaseHTTPServer
import sys
import os
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
def remove_duplicates(lst):
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
Returns a copy of a list without duplicates. Every list
item must be hashable and the order of the items in the
resulting list is not defined.
"""
u = {}
for x in lst:
u[x] = 1
return u.keys()
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. There should never be any
reason to instantiate this class directly.
"""
def __init__(self, allow_none, encoding):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the prefered means
of changing method dispatch behavior.
"""
try:
params, method = xmlrpclib.loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault, fault:
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = self.funcs.keys()
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods = remove_duplicates(
methods + self.instance._listMethods()
)
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods = remove_duplicates(
methods + list_public_methods(self.instance)
)
methods.sort()
return methods
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if self.funcs.has_key(method_name):
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault, fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = ''.join(L)
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None)
)
except: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = 'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
SocketServer.TCPServer.__init__(self, addr, requestHandler)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print 'Content-Type: text/xml'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = \
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
print 'Status: %d %s' % (code, message)
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def handle_request(self, request_text = None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
if request_text is None:
request_text = sys.stdin.read()
self.handle_xmlrpc(request_text)
if __name__ == '__main__':
print 'Running XML-RPC server on port 8000'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()

View File

@ -1,588 +0,0 @@
"""Generic socket server classes.
This module tries to capture the various aspects of defining a server:
For socket-based servers:
- address family:
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
For request-based servers (including socket-based):
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
save some typing. (There's also the issue that a deep class hierarchy
slows down method lookups.)
There are five classes in an inheritance diagram, four of which represent
synchronous servers of four types:
+------------+
| BaseServer |
+------------+
|
v
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
stream server is the address family, which is simply repeated in both
unix server classes.
Forking and threading versions of each type of server can be created
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer! Setting the various member variables also changes
the behavior of the underlying server mechanism.
To implement a service, you must derive a class from
BaseRequestHandler and redefine its handle() method. You can then run
various versions of the service by combining one of the server classes
with your request handler class.
The request handler class must be different for datagram or stream
services. This can be hidden by using the request handler
subclasses StreamRequestHandler or DatagramRequestHandler.
Of course, you still have to use your head!
For instance, it makes no sense to use a forking server if the service
contains state in memory that can be modified by requests (since the
modifications in the child process would never reach the initial state
kept in the parent process and passed to each child). In this case,
you can use a threading server, but you will probably have to use
locks to avoid two requests that come in nearly simultaneous to apply
conflicting changes to the server state.
On the other hand, if you are building e.g. an HTTP server, where all
data is stored externally (e.g. in the file system), a synchronous
class will essentially render the service "deaf" while one request is
being handled -- which may be for a very long time if a client is slow
to reqd all the data it has requested. Here a threading or forking
server is appropriate.
In some cases, it may be appropriate to process part of a request
synchronously, but to finish processing in a forked child depending on
the request data. This can be implemented by using a synchronous
server and doing an explicit fork in the request handler class
handle() method.
Another approach to handling multiple simultaneous requests in an
environment that supports neither threads nor fork (or where these are
too expensive or inappropriate for the service) is to maintain an
explicit table of partially finished requests and to use select() to
decide which request to work on next (or whether to handle a new
incoming request). This is particularly important for stream services
where each client can potentially be connected for a long time (if
threads or subprocesses cannot be used).
Future work:
- Standard classes for Sun RPC (which uses either UDP or TCP)
- Standard mix-in classes to implement various authentication
and encryption schemes
- Standard framework for select-based multiplexing
XXX Open problems:
- What to do with out-of-band data?
BaseServer:
- split generic "request" functionality out into BaseServer class.
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
example: read entries from a SQL database (requires overriding
get_request() to return a table entry from the database).
entry is processed by a RequestHandlerClass.
"""
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
# XXX Warning!
# There is a test suite for this module, but it cannot be run by the
# standard regression test.
# To run it manually, run Lib/test/test_socketserver.py.
__version__ = "0.4"
import socket
import sys
import os
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
"StreamRequestHandler","DatagramRequestHandler",
"ThreadingMixIn", "ForkingMixIn"]
if hasattr(socket, "AF_UNIX"):
__all__.extend(["UnixStreamServer","UnixDatagramServer",
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
class BaseServer:
"""Base class for server classes.
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you do not use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- server_close()
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- allow_reuse_address
Instance variables:
- RequestHandlerClass
- socket
"""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
pass
def serve_forever(self):
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
#
# - handle_request() is the top-level call. It calls
# get_request(), verify_request() and process_request()
# - get_request() is different for stream or datagram sockets
# - process_request() is the place that may fork a new process
# or create a new thread to finish the request
# - finish_request() instantiates the request handler class;
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking."""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
Return True if we should proceed with this request.
"""
return True
def process_request(self, request, client_address):
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
self.close_request(request)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
pass
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def close_request(self, request):
"""Called to clean up an individual request."""
pass
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print '-'*40
class TCPServer(BaseServer):
"""Base class for various socket-based server classes.
Defaults to synchronous IP stream (i.e., TCP).
Methods for the caller:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
- server_bind()
- server_activate()
- get_request() -> request, client_address
- verify_request(request, client_address)
- process_request(request, client_address)
- close_request(request)
- handle_error()
Methods for derived classes:
- finish_request(request, client_address)
Class variables that may be overridden by derived classes or
instances:
- address_family
- socket_type
- request_queue_size (only for stream sockets)
- allow_reuse_address
Instance variables:
- server_address
- RequestHandlerClass
- socket
"""
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 5
allow_reuse_address = False
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
May be overridden.
"""
if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
def server_activate(self):
"""Called by constructor to activate the server.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
def server_close(self):
"""Called to clean-up the server.
May be overridden.
"""
self.socket.close()
def fileno(self):
"""Return socket file number.
Interface required by select().
"""
return self.socket.fileno()
def get_request(self):
"""Get the request and client address from the socket.
May be overridden.
"""
return self.socket.accept()
def close_request(self, request):
"""Called to clean up an individual request."""
request.close()
class UDPServer(TCPServer):
"""UDP server class."""
allow_reuse_address = False
socket_type = socket.SOCK_DGRAM
max_packet_size = 8192
def get_request(self):
data, client_addr = self.socket.recvfrom(self.max_packet_size)
return (data, self.socket), client_addr
def server_activate(self):
# No need to call listen() for UDP.
pass
def close_request(self, request):
# No need to close anything.
pass
class ForkingMixIn:
"""Mix-in class to handle each request in a new process."""
active_children = None
max_children = 40
def collect_children(self):
"""Internal routine to wait for died children."""
while self.active_children:
if len(self.active_children) < self.max_children:
options = os.WNOHANG
else:
# If the maximum number of children are already
# running, block while waiting for a child to exit
options = 0
try:
pid, status = os.waitpid(0, options)
except os.error:
pid = None
if not pid: break
self.active_children.remove(pid)
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
finally:
os._exit(1)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
# Decides how threads will act upon termination of the
# main process
daemon_threads = False
def process_request_thread(self, request, client_address):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
self.close_request(request)
except:
self.handle_error(request, client_address)
self.close_request(request)
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
import threading
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
if self.daemon_threads:
t.setDaemon (1)
t.start()
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
class BaseRequestHandler:
"""Base class for request handler classes.
This class is instantiated for each request to be handled. The
constructor sets the instance variables request, client_address
and server, and then calls the handle() method. To implement a
specific service, all you need to do is to derive a class which
defines a handle() method.
The handle() method can find the request as self.request, the
client address as self.client_address, and the server (in case it
needs access to per-server information) as self.server. Since a
separate instance is created for each request, the handle() method
can define arbitrary other instance variariables.
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
def setup(self):
pass
def handle(self):
pass
def finish(self):
pass
# The following two classes make it possible to use the same service
# class for stream or datagram servers.
# Each class sets up these instance variables:
# - rfile: a file object from which receives the request is read
# - wfile: a file object to which the reply is written
# When the handle() method returns, wfile is flushed properly
class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = 0
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', self.rbufsize)
self.wfile = self.connection.makefile('wb', self.wbufsize)
def finish(self):
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
self.rfile.close()
class DatagramRequestHandler(BaseRequestHandler):
# XXX Regrettably, I cannot get this working on Linux;
# s.recvfrom() doesn't return a meaningful client address.
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO(self.packet)
self.wfile = StringIO()
def finish(self):
self.socket.sendto(self.wfile.getvalue(), self.client_address)

View File

@ -1,323 +0,0 @@
r"""File-like objects that read from or write to a string buffer.
This implements (nearly) all stdio methods.
f = StringIO() # ready for writing
f = StringIO(buf) # ready for reading
f.close() # explicitly release resources held
flag = f.isatty() # always false
pos = f.tell() # get current position
f.seek(pos) # set current position
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
buf = f.read() # read until EOF
buf = f.read(n) # read up to n bytes
buf = f.readline() # read until end of line ('\n') or EOF
list = f.readlines()# list of f.readline() results until EOF
f.truncate([size]) # truncate file at to at most size (default: current pos)
f.write(buf) # write at current position
f.writelines(list) # for line in list: f.write(line)
f.getvalue() # return whole file's contents as a string
Notes:
- Using a real file is often faster (but less convenient).
- There's also a much faster implementation in C, called cStringIO, but
it's not subclassable.
- fileno() is left unimplemented so that code which uses it triggers
an exception early.
- Seeking far beyond EOF and then writing will insert real null
bytes that occupy space in the buffer.
- There's a simple test set (see end of this file).
"""
try:
from errno import EINVAL
except ImportError:
EINVAL = 22
__all__ = ["StringIO"]
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class StringIO:
"""class StringIO([buffer])
When a StringIO object is created, it can be initialized to an existing
string by passing the string to the constructor. If no string is given,
the StringIO will start empty.
The StringIO object can accept either Unicode or 8-bit strings, but
mixing the two may take some care. If both are used, 8-bit strings that
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
a UnicodeError to be raised when getvalue() is called.
"""
def __init__(self, buf = ''):
# Force self.buf to be a string or unicode
if not isinstance(buf, basestring):
buf = str(buf)
self.buf = buf
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def __iter__(self):
return self
def next(self):
"""A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.
"""
_complain_ifclosed(self.closed)
r = self.readline()
if not r:
raise StopIteration
return r
def close(self):
"""Free the memory buffer.
"""
if not self.closed:
self.closed = True
del self.buf, self.pos
def isatty(self):
"""Returns False because StringIO objects are not connected to a
tty-like device.
"""
_complain_ifclosed(self.closed)
return False
def seek(self, pos, mode = 0):
"""Set the file's current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file's end).
There is no return value.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
"""Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readline(self, length=None):
r"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio's fgets(), the returned string contains null
characters ('\0') if they occurred in the input.
"""
_complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if i < 0:
newpos = self.len
else:
newpos = i+1
if length is not None:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def readlines(self, sizehint = 0):
"""Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).
"""
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def truncate(self, size=None):
"""Truncate the file's size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file's current size, the
file remains unchanged.
"""
_complain_ifclosed(self.closed)
if size is None:
size = self.pos
elif size < 0:
raise IOError(EINVAL, "Negative size not allowed")
elif size < self.pos:
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
def write(self, s):
"""Write a string to the file.
There is no return value.
"""
_complain_ifclosed(self.closed)
if not s: return
# Force s to be a string or unicode
if not isinstance(s, basestring):
s = str(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def writelines(self, iterable):
"""Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)
"""
write = self.write
for line in iterable:
write(line)
def flush(self):
"""Flush the internal buffer
"""
_complain_ifclosed(self.closed)
def getvalue(self):
"""
Retrieve the entire contents of the "file" at any time before
the StringIO object's close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.
"""
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
# A little test suite
def test():
import sys
if sys.argv[1:]:
file = sys.argv[1]
else:
file = '/etc/passwd'
lines = open(file, 'r').readlines()
text = open(file, 'r').read()
f = StringIO()
for line in lines[:-2]:
f.write(line)
f.writelines(lines[-2:])
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print 'First line =', repr(f.readline())
print 'Position =', f.tell()
line = f.readline()
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
raise RuntimeError, 'bad result after seek back'
f.seek(len(line2), 1)
list = f.readlines()
line = list[-1]
f.seek(f.tell() - len(line))
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.truncate(length/2)
f.seek(0, 2)
print 'Truncated length =', f.tell()
if f.tell() != length/2:
raise RuntimeError, 'truncate did not adjust length'
f.close()
if __name__ == '__main__':
test()

File diff suppressed because it is too large Load Diff

View File

@ -1,175 +0,0 @@
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if not self.has_key(key):
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())

View File

@ -1,85 +0,0 @@
"""A more or less complete user-defined wrapper around list objects."""
class UserList:
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)

View File

@ -1,194 +0,0 @@
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_as, py = os.path.splitext(called_as)
if '-q' in sys.argv:
from test import test_support
test_support.verbose = 0
__import__('test.test_' + called_as.lower())

View File

@ -1,170 +0,0 @@
"""Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))

View File

@ -1,149 +0,0 @@
"""Mozilla / Netscape cookie loading / saving."""
import re, time
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT)
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()

View File

@ -1,116 +0,0 @@
"""Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names used by
# compile.h, so that an editor search will find them here. However,
# they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(2, 7, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)

View File

@ -1,54 +0,0 @@
import com.sun.jna as jna
def get_libc():
return CDLL("c")
typecode_map = {'h': 2, 'H': 2}
class Array(object):
def __init__(self, typecode):
self.typecode = typecode
self.itemsize = typecode_map[typecode]
def __call__(self, size, autofree=False):
if not autofree:
raise Exception
return ArrayInstance(self, size)
class ArrayInstance(object):
def __init__(self, shape, size):
self.shape = shape
self.alloc = jna.Memory(shape.itemsize * size)
def __setitem__(self, index, value):
self.alloc.setShort(index, value)
def __getitem__(self, index):
return self.alloc.getShort(index)
class FuncPtr(object):
def __init__(self, fn, name, argtypes, restype):
self.fn = fn
self.name = name
self.argtypes = argtypes
self.restype = restype
def __call__(self, *args):
container = Array('H')(1, autofree=True)
container[0] = self.fn.invokeInt([i[0] for i in args])
return container
class CDLL(object):
def __init__(self, libname):
self.lib = jna.NativeLibrary.getInstance(libname)
self.cache = dict()
def ptr(self, name, argtypes, restype):
key = (name, tuple(argtypes), restype)
try:
return self.cache[key]
except KeyError:
fn = self.lib.getFunction(name)
fnp = FuncPtr(fn, name, argtypes, restype)
self.cache[key] = fnp
return fnp

View File

@ -1,451 +0,0 @@
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date
try:
from thread import allocate_lock as _thread_allocate_lock
except:
from dummy_thread import allocate_lock as _thread_allocate_lock
__author__ = "Brett Cannon"
__email__ = "brett@python.org"
__all__ = ['strptime']
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super(TimeRE, self)
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
_cache_lock.acquire()
try:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError, err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
finally:
_cache_lock.release()
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))

View File

@ -1,241 +0,0 @@
"""Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that almost all platforms do have support for
# locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest on most boxes.
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__(self):
import threading
key = object.__getattribute__(self, '_local__key')
try:
threads = list(threading.enumerate())
except:
# If enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up.
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace.
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
from threading import currentThread, RLock

View File

@ -1,961 +0,0 @@
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(chr(len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = Chunk(file)
if self._file.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = self._file.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
elif chunkname in _skiplist:
pass
else:
raise Error, 'unrecognized chunk type '+chunk.chunkname
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
if self._decomp:
self._decomp.CloseDecompressor()
self._decomp = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) / self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) / 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._framesize = self._framesize / 4
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._ulaw2lin
self._framesize = self._framesize / 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
self._framesize = self._framesize / 2
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
self._framesize = self._framesize / 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) / (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
self._file.flush()
self._file = None
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ALAW'):
self._datalength = self._datalength / 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) / 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print "Done."

View File

@ -1,83 +0,0 @@
"""Generic interface to all dbm clones.
Instead of
import dbm
d = dbm.open(file, 'w', 0666)
use
import anydbm
d = anydbm.open(file, 'w')
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
dependent on the type of database being opened (determined by whichdb
module) in the case of an existing dbm. If the dbm does not exist and
the create or new flag ('c' or 'n') was specified, the dbm type will
be determined by the availability of the modules (tested in the above
order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, add interfaces to other dbm-like
implementations.
The open function has an optional second argument. This can be 'r',
for read-only access, 'w', for read-write access of an existing
database, 'c' for read-write access to a new or existing database, and
'n' for read-write access to a new database. The default is 'r'.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
class error(Exception):
pass
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
_errors = [error]
_defaultmod = None
for _name in _names:
try:
_mod = __import__(_name)
except ImportError:
continue
if not _defaultmod:
_defaultmod = _mod
_errors.append(_mod.error)
if not _defaultmod:
raise ImportError, "no dbm clone found; tried %s" % _names
error = tuple(_errors)
def open(file, flag = 'r', mode = 0666):
# guess the type of an existing database
from whichdb import whichdb
result=whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new
# flag was used so use default type
mod = _defaultmod
else:
raise error, "need 'c' or 'n' flag to open new db"
elif result == "":
# db type cannot be determined
raise error, "db type could not be determined"
else:
mod = __import__(result)
return mod.open(file, flag, mode)

View File

@ -1,301 +0,0 @@
# -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(expr, filename='<unknown>', mode='exec'):
"""
Parse an expression into an AST node.
Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST).
"""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all child nodes of *node*, in no specified order. This is
useful if you only want to modify nodes in place and don't care about the
context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node

View File

@ -1,295 +0,0 @@
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
def __init__ (self, conn=None):
self.ac_in_buffer = ''
self.ac_out_buffer = ''
self.producer_fifo = fifo()
asyncore.dispatcher.__init__ (self, conn)
def collect_incoming_data(self, data):
raise NotImplementedError, "must be implemented in subclass"
def found_terminator(self):
raise NotImplementedError, "must be implemented in subclass"
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
self.handle_error()
return
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(1024).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
elif isinstance(terminator, int) or isinstance(terminator, long):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
def handle_write (self):
self.initiate_send ()
def handle_close (self):
self.close()
def push (self, data):
self.producer_fifo.push (simple_producer (data))
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.push (producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
def writable (self):
"predicate for inclusion in the writable for select()"
# return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# this is about twice as fast, though not as clear.
return not (
(self.ac_out_buffer == '') and
self.producer_fifo.is_empty() and
self.connected
)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.push (None)
# refill the outgoing buffer by calling the more() method
# of the first producer in the queue
def refill_buffer (self):
while 1:
if len(self.producer_fifo):
p = self.producer_fifo.first()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if p is None:
if not self.ac_out_buffer:
self.producer_fifo.pop()
self.close()
return
elif isinstance(p, str):
self.producer_fifo.pop()
self.ac_out_buffer = self.ac_out_buffer + p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
else:
self.producer_fifo.pop()
else:
return
def initiate_send (self):
obs = self.ac_out_buffer_size
# try to refill the buffer
if (len (self.ac_out_buffer) < obs):
self.refill_buffer()
if self.ac_out_buffer and self.connected:
# try to send the buffer
try:
num_sent = self.send (self.ac_out_buffer[:obs])
if num_sent:
self.ac_out_buffer = self.ac_out_buffer[num_sent:]
except socket.error, why:
self.handle_error()
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = ''
self.ac_out_buffer = ''
while self.producer_fifo:
self.producer_fifo.pop()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = ''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l

View File

@ -1,551 +0,0 @@
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def _exception (obj):
try:
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & (select.POLLIN | select.POLLPRI):
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=True, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = True
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
self._fileno = self.socket
self.add_channel()
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 1
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.connected = True
self.handle_connect()
else:
raise socket.error, (err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = True
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = True
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = True
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()

View File

@ -1,62 +0,0 @@
"""
atexit.py - allow programmer to define multiple exit functions to be executed
upon normal program termination.
One public function, register, is defined.
"""
__all__ = ["register"]
import sys
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
exc_info = None
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
try:
func(*targs, **kargs)
except SystemExit:
exc_info = sys.exc_info()
except:
import traceback
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
traceback.print_exc()
exc_info = sys.exc_info()
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
"""
_exithandlers.append((func, targs, kargs))
if hasattr(sys, "exitfunc"):
# Assume it's another registered exit function - append it to our list
register(sys.exitfunc)
sys.exitfunc = _run_exitfuncs
if __name__ == "__main__":
def x1():
print "running x1"
def x2(n):
print "running x2(%r)" % (n,)
def x3(n, kwd=None):
print "running x3(%r, kwd=%r)" % (n, kwd)
register(x1)
register(x2, 12)
register(x3, 5, "bar")
register(x3, "no kwd args")

View File

@ -1,359 +0,0 @@
#! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
func(open(args[0], 'rb'), sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()

View File

@ -1,613 +0,0 @@
"""Debugger basics"""
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self):
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self.stopframe = None
self.returnframe = None
self.quitting = 0
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if frame is self.stopframe:
return True
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.breaks.has_key(
self.canonic(frame.f_code.co_filename))
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_step(self):
"""Stop after one line of code."""
self.stopframe = None
self.returnframe = None
self.quitting = 0
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self.stopframe = frame
self.returnframe = None
self.quitting = 0
def set_return(self, frame):
"""Stop when returning from the given frame."""
self.stopframe = frame.f_back
self.returnframe = frame
self.quitting = 0
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 0
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
if not Breakpoint.bplist.has_key((filename, lineno)):
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
self.clear_break(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if self.bplist.has_key((file, line)):
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end

View File

@ -1,527 +0,0 @@
"""Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, (name, finfo, dlen, rlen), ofp):
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
if os.name == 'mac':
fss = FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = FSSpec(out)
out = ofss.as_pathname()
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
def _test():
if os.name == 'mac':
import macfs
fss, ok = macfs.PromptGetFile('File to convert:')
if not ok:
sys.exit(0)
fname = fss.as_pathname()
else:
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()

View File

@ -1,84 +0,0 @@
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect
except ImportError:
pass

View File

@ -1,705 +0,0 @@
"""Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
from __future__ import with_statement
import sys
import datetime
import locale as _locale
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in xrange(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in xrange(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return 1 for leap years, 0 for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return a iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in xrange(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
date += oneday
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield (0, date.weekday())
else:
yield (date.day, date.weekday())
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield 0
else:
yield date.day
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in xrange(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains upto width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in xrange(January, January+12)
]
return [months[i:i+width] for i in xrange(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in xrange(January, January+12)
]
return [months[i:i+width] for i in xrange(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in xrange(January, January+12)
]
return [months[i:i+width] for i in xrange(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print self.formatweek(theweek, width),
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print self.formatmonth(theyear, themonth, w, l),
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = xrange(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in xrange(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print self.formatyear(theyear, w, l, c, m)
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday">&nbsp;</td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in xrange(January, January+12, width):
# months in this row
months = xrange(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class TimeEncoding:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.setlocale(_locale.LC_TIME, self.locale)
return _locale.getlocale(_locale.LC_TIME)[1]
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with TimeEncoding(self.locale) as encoding:
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
if encoding is not None:
name = name.decode(encoding)
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with TimeEncoding(self.locale) as encoding:
s = month_name[themonth]
if encoding is not None:
s = s.decode(encoding)
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with TimeEncoding(self.locale) as encoding:
s = day_abbr[day]
if encoding is not None:
s = s.decode(encoding)
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with TimeEncoding(self.locale) as encoding:
s = month_name[themonth]
if encoding is not None:
s = s.decode(encoding)
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print formatstring(cols, colwidth, spacing)
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
parser.add_option(
"-w", "--width",
dest="width", type="int", default=2,
help="width of date column (default 2, text only)"
)
parser.add_option(
"-l", "--lines",
dest="lines", type="int", default=1,
help="number of lines for each week (default 1, text only)"
)
parser.add_option(
"-s", "--spacing",
dest="spacing", type="int", default=6,
help="spacing between months (default 6, text only)"
)
parser.add_option(
"-m", "--months",
dest="months", type="int", default=3,
help="months per row (default 3, text only)"
)
parser.add_option(
"-c", "--css",
dest="css", default="calendar.css",
help="CSS to use for page (html only)"
)
parser.add_option(
"-L", "--locale",
dest="locale", default=None,
help="locale to be used from month and weekday names"
)
parser.add_option(
"-e", "--encoding",
dest="encoding", default=None,
help="Encoding to use for output"
)
parser.add_option(
"-t", "--type",
dest="type", default="text",
choices=("text", "html"),
help="output type (text or html)"
)
(options, args) = parser.parse_args(args)
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
if len(args) == 1:
print cal.formatyearpage(datetime.date.today().year, **optdict)
elif len(args) == 2:
print cal.formatyearpage(int(args[1]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if len(args) != 3:
optdict["c"] = options.spacing
optdict["m"] = options.months
if len(args) == 1:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif len(args) == 2:
result = cal.formatyear(int(args[1]), **optdict)
elif len(args) == 3:
result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
if options.encoding:
result = result.encode(options.encoding)
print result
if __name__ == "__main__":
main(sys.argv)

File diff suppressed because it is too large Load Diff

View File

@ -1,318 +0,0 @@
"""More comprehensive traceback formatting for Python scripts.
To enable this module, do:
import cgitb; cgitb.enable()
at the top of your script. The optional arguments to enable() are:
display - if true, tracebacks are displayed in the web browser
logdir - if set, tracebacks are written to files in this directory
context - number of lines of source code to show for each stack frame
format - 'text' or 'html' controls the output format
By default, tracebacks are displayed but not saved, the context is 5 lines
and the output format is 'html' (for backwards compatibility with the
original use of this module)
Alternatively, if you have caught an exception and want cgitb to display it
for you, call cgitb.handler(). The optional argument to handler() is a
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
The default handler displays output as HTML.
"""
__author__ = 'Ka-Ping Yee'
__version__ = '$Revision: 55349 $'
import sys
def reset():
"""Return a string that resets the CGI and browser to a known state."""
return '''<!--: spam
Content-Type: text/html
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
</font> </font> </font> </script> </object> </blockquote> </pre>
</table> </table> </table> </table> </table> </font> </font> </font>'''
__UNDEF__ = [] # a special sentinel object
def small(text):
if text:
return '<small>' + text + '</small>'
else:
return ''
def strong(text):
if text:
return '<strong>' + text + '</strong>'
else:
return ''
def grey(text):
if text:
return '<font color="#909090">' + text + '</font>'
else:
return ''
def lookup(name, frame, locals):
"""Find the value for a given name in the given environment."""
if name in locals:
return 'local', locals[name]
if name in frame.f_globals:
return 'global', frame.f_globals[name]
if '__builtins__' in frame.f_globals:
builtins = frame.f_globals['__builtins__']
if type(builtins) is type({}):
if name in builtins:
return 'builtin', builtins[name]
else:
if hasattr(builtins, name):
return 'builtin', getattr(builtins, name)
return None, __UNDEF__
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
import tokenize, keyword
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
def html((etype, evalue, etb), context=5):
"""Return a nice HTML document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
'<big><big>%s</big></big>' %
strong(pydoc.html.escape(str(etype))),
'#ffffff', '#6622aa', pyver + '<br>' + date) + '''
<p>A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.</p>'''
indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>'
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
if file:
file = os.path.abspath(file)
link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
else:
file = link = '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + strong(func) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
('<big>&nbsp;</big>', link, call)]
if index is not None:
i = lnum - index
for line in lines:
num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;'
line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
if i in highlight:
rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
else:
rows.append('<tr><td>%s</td></tr>' % grey(line))
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where in ('global', 'builtin'):
name = ('<em>%s</em> ' % where) + strong(name)
elif where == 'local':
name = strong(name)
else:
name = where + strong(name.split('.')[-1])
dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value)))
else:
dump.append(name + ' <em>undefined</em>')
rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
frames.append('''
<table width="100%%" cellspacing=0 cellpadding=0 border=0>
%s</table>''' % '\n'.join(rows))
exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
pydoc.html.escape(str(evalue)))]
if isinstance(evalue, BaseException):
for name in dir(evalue):
if name[:1] == '_': continue
value = pydoc.html.repr(getattr(evalue, name))
exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
<!-- The above is a description of an error in a Python program, formatted
for a Web browser because the 'cgitb' module was enabled. In case you
are not reading this in a Web browser, here is the original traceback:
%s
-->
''' % pydoc.html.escape(
''.join(traceback.format_exception(etype, evalue, etb)))
def text((etype, evalue, etb), context=5):
"""Return a plain text document describing a given traceback."""
import os, types, time, traceback, linecache, inspect, pydoc
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
'''
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + func + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = [' %s %s' % (file, call)]
if index is not None:
i = lnum - index
for line in lines:
num = '%5d ' % i
rows.append(num+line.rstrip())
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
else:
dump.append(name + ' undefined')
rows.append('\n'.join(dump))
frames.append('\n%s\n' % '\n'.join(rows))
exception = ['%s: %s' % (str(etype), str(evalue))]
if isinstance(evalue, BaseException):
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (" "*4, name, value))
import traceback
return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb))
class Hook:
"""A hook to replace sys.excepthook that shows tracebacks in HTML."""
def __init__(self, display=1, logdir=None, context=5, file=None,
format="html"):
self.display = display # send tracebacks to browser if true
self.logdir = logdir # log tracebacks to files if not None
self.context = context # number of source code lines per frame
self.file = file or sys.stdout # place to send the output
self.format = format
def __call__(self, etype, evalue, etb):
self.handle((etype, evalue, etb))
def handle(self, info=None):
info = info or sys.exc_info()
if self.format == "html":
self.file.write(reset())
formatter = (self.format=="html") and html or text
plain = False
try:
doc = formatter(info, self.context)
except: # just in case something goes wrong
import traceback
doc = ''.join(traceback.format_exception(*info))
plain = True
if self.display:
if plain:
doc = doc.replace('&', '&amp;').replace('<', '&lt;')
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
else:
self.file.write('<p>A problem occurred in a Python script.\n')
if self.logdir is not None:
import os, tempfile
suffix = ['.txt', '.html'][self.format=="html"]
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
try:
file = os.fdopen(fd, 'w')
file.write(doc)
file.close()
msg = '<p> %s contains the description of this error.' % path
except:
msg = '<p> Tried to save traceback to %s, but failed.' % path
self.file.write(msg + '\n')
try:
self.file.flush()
except: pass
handler = Hook().handle
def enable(display=1, logdir=None, context=5, format="html"):
"""Install an exception handler that formats tracebacks as HTML.
The optional argument 'display' can be set to 0 to suppress sending the
traceback to the browser, and 'logdir' can be set to a directory to cause
tracebacks to be written to files there."""
sys.excepthook = Hook(display=display, logdir=logdir,
context=context, format=format)

View File

@ -1,167 +0,0 @@
"""Simple class to read IFF chunks.
An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
Format)) has the following structure:
+----------------+
| ID (4 bytes) |
+----------------+
| size (4 bytes) |
+----------------+
| data |
| ... |
+----------------+
The ID is a 4-byte string which identifies the type of chunk.
The size field (a 32-bit value, encoded using big-endian byte order)
gives the size of the whole chunk, including the 8-byte header.
Usually an IFF-type file consists of one or more chunks. The proposed
usage of the Chunk class defined here is to instantiate an instance at
the start of each chunk and read from the instance until it reaches
the end, after which a new instance can be instantiated. At the end
of the file, creating a new instance will fail with a EOFError
exception.
Usage:
while True:
try:
chunk = Chunk(file)
except EOFError:
break
chunktype = chunk.getname()
while True:
data = chunk.read(nbytes)
if not data:
pass
# do something with data
The interface is file-like. The implemented methods are:
read, close, seek, tell, isatty.
Extra methods are: skip() (called by close, skips to the end of the chunk),
getname() (returns the name (ID) of the chunk)
The __init__ method has one required argument, a file-like object
(including a chunk instance), and one optional argument, a flag which
specifies whether or not chunks are aligned on 2-byte boundaries. The
default is 1, i.e. aligned.
"""
class Chunk:
def __init__(self, file, align=True, bigendian=True, inclheader=False):
import struct
self.closed = False
self.align = align # whether to align to word (2-byte) boundaries
if bigendian:
strflag = '>'
else:
strflag = '<'
self.file = file
self.chunkname = file.read(4)
if len(self.chunkname) < 4:
raise EOFError
try:
self.chunksize = struct.unpack(strflag+'L', file.read(4))[0]
except struct.error:
raise EOFError
if inclheader:
self.chunksize = self.chunksize - 8 # subtract header
self.size_read = 0
try:
self.offset = self.file.tell()
except (AttributeError, IOError):
self.seekable = False
else:
self.seekable = True
def getname(self):
"""Return the name (ID) of the current chunk."""
return self.chunkname
def getsize(self):
"""Return the size of the current chunk."""
return self.chunksize
def close(self):
if not self.closed:
self.skip()
self.closed = True
def isatty(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return False
def seek(self, pos, whence=0):
"""Seek to specified position into the chunk.
Default position is 0 (start of chunk).
If the file is not seekable, this will result in an error.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if not self.seekable:
raise IOError, "cannot seek"
if whence == 1:
pos = pos + self.size_read
elif whence == 2:
pos = pos + self.chunksize
if pos < 0 or pos > self.chunksize:
raise RuntimeError
self.file.seek(self.offset + pos, 0)
self.size_read = pos
def tell(self):
if self.closed:
raise ValueError, "I/O operation on closed file"
return self.size_read
def read(self, size=-1):
"""Read at most size bytes from the chunk.
If size is omitted or negative, read until the end
of the chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.size_read >= self.chunksize:
return ''
if size < 0:
size = self.chunksize - self.size_read
if size > self.chunksize - self.size_read:
size = self.chunksize - self.size_read
data = self.file.read(size)
self.size_read = self.size_read + len(data)
if self.size_read == self.chunksize and \
self.align and \
(self.chunksize & 1):
dummy = self.file.read(1)
self.size_read = self.size_read + len(dummy)
return data
def skip(self):
"""Skip the rest of the chunk.
If you are not interested in the contents of the chunk,
this method should be called so that the file points to
the start of the next chunk.
"""
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.seekable:
try:
n = self.chunksize - self.size_read
# maybe fix alignment
if self.align and (self.chunksize & 1):
n = n + 1
self.file.seek(n, 1)
self.size_read = self.size_read + n
return
except IOError:
pass
while self.size_read < self.chunksize:
n = min(8192, self.chunksize - self.size_read)
dummy = self.read(n)
if not dummy:
raise EOFError

View File

@ -1,405 +0,0 @@
"""A generic class to build line-oriented command interpreters.
Interpreters constructed with this class obey the following conventions:
1. End of file on input is processed as the command 'EOF'.
2. A command is parsed out of each line by collecting the prefix composed
of characters in the identchars member.
3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
is passed a single argument consisting of the remainder of the line.
4. Typing an empty line repeats the last command. (Actually, it calls the
method `emptyline', which may be overridden in a subclass.)
5. There is a predefined `help' method. Given an argument `topic', it
calls the command `help_topic'. With no arguments, it lists all topics
with defined help_ functions, broken into up to three topics; documented
commands, miscellaneous help topics, and undocumented commands.
6. The command '?' is a synonym for `help'. The command '!' is a synonym
for `shell', if a do_shell method exists.
7. If completion is enabled, completing commands will be done automatically,
and completing of commands args is done by calling complete_foo() with
arguments text, line, begidx, endidx. text is string we are matching
against, all returned matches must begin with it. line is the current
input line (lstripped), begidx and endidx are the beginning and end
indexes of the text being matched, which could be used to provide
different completion depending upon which position the argument is in.
The `default' method may be overridden to intercept commands for which there
is no do_ method.
The `completedefault' method may be overridden to intercept completions for
commands that have no complete_ method.
The data member `self.ruler' sets the character used to draw separator lines
in the help messages. If empty, no ruler line is drawn. It defaults to "=".
If the value of `self.intro' is nonempty when the cmdloop method is called,
it is printed out on interpreter startup. This value may be overridden
via an optional argument to the cmdloop() method.
The data members `self.doc_header', `self.misc_header', and
`self.undoc_header' set the headers used for the help function's
listings of documented functions, miscellaneous topics, and undocumented
functions respectively.
These interpreters use raw_input; thus, if the readline module is loaded,
they automatically support Emacs-like command history and editing features.
"""
import string
__all__ = ["Cmd"]
PROMPT = '(Cmd) '
IDENTCHARS = string.ascii_letters + string.digits + '_'
class Cmd:
"""A simple framework for writing line-oriented command interpreters.
These are often useful for test harnesses, administrative tools, and
prototypes that will later be wrapped in a more sophisticated interface.
A Cmd instance or subclass instance is a line-oriented interpreter
framework. There is no good reason to instantiate Cmd itself; rather,
it's useful as a superclass of an interpreter class you define yourself
in order to inherit Cmd's methods and encapsulate action methods.
"""
prompt = PROMPT
identchars = IDENTCHARS
ruler = '='
lastcmd = ''
intro = None
doc_leader = ""
doc_header = "Documented commands (type help <topic>):"
misc_header = "Miscellaneous help topics:"
undoc_header = "Undocumented commands:"
nohelp = "*** No help on %s"
use_rawinput = 1
def __init__(self, completekey='tab', stdin=None, stdout=None):
"""Instantiate a line-oriented interpreter framework.
The optional argument 'completekey' is the readline name of a
completion key; it defaults to the Tab key. If completekey is
not None and the readline module is available, command completion
is done automatically. The optional arguments stdin and stdout
specify alternate input and output file objects; if not specified,
sys.stdin and sys.stdout are used.
"""
import sys
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey+": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = 'EOF'
else:
line = line[:-1] # chop \n
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
def precmd(self, line):
"""Hook method executed just before the command line is
interpreted, but after the input prompt is generated and issued.
"""
return line
def postcmd(self, stop, line):
"""Hook method executed just after a command dispatch is finished."""
return stop
def preloop(self):
"""Hook method executed once when the cmdloop() method is called."""
pass
def postloop(self):
"""Hook method executed once when the cmdloop() method is about to
return.
"""
pass
def parseline(self, line):
"""Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
'command' and 'args' may be None if the line couldn't be parsed.
"""
line = line.strip()
if not line:
return None, None, line
elif line[0] == '?':
line = 'help ' + line[1:]
elif line[0] == '!':
if hasattr(self, 'do_shell'):
line = 'shell ' + line[1:]
else:
return None, None, line
i, n = 0, len(line)
while i < n and line[i] in self.identchars: i = i+1
cmd, arg = line[:i], line[i:].strip()
return cmd, arg, line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def emptyline(self):
"""Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.
"""
if self.lastcmd:
return self.onecmd(self.lastcmd)
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.
"""
self.stdout.write('*** Unknown syntax: %s\n'%line)
def completedefault(self, *ignored):
"""Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.
"""
return []
def completenames(self, text, *ignored):
dotext = 'do_'+text
return [a[3:] for a in self.get_names() if a.startswith(dotext)]
def complete(self, text, state):
"""Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if state == 0:
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = len(origline) - len(line)
begidx = readline.get_begidx() - stripped
endidx = readline.get_endidx() - stripped
if begidx>0:
cmd, args, foo = self.parseline(line)
if cmd == '':
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, 'complete_' + cmd)
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
def get_names(self):
# Inheritance says we have to look in class and
# base classes; order is not important.
names = []
classes = [self.__class__]
while classes:
aclass = classes.pop(0)
if aclass.__bases__:
classes = classes + list(aclass.__bases__)
names = names + dir(aclass)
return names
def complete_help(self, *args):
return self.completenames(*args)
def do_help(self, arg):
if arg:
# XXX check arg syntax
try:
func = getattr(self, 'help_' + arg)
except AttributeError:
try:
doc=getattr(self, 'do_' + arg).__doc__
if doc:
self.stdout.write("%s\n"%str(doc))
return
except AttributeError:
pass
self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
return
func()
else:
names = self.get_names()
cmds_doc = []
cmds_undoc = []
help = {}
for name in names:
if name[:5] == 'help_':
help[name[5:]]=1
names.sort()
# There can be duplicates if routines overridden
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
cmd=name[3:]
if cmd in help:
cmds_doc.append(cmd)
del help[cmd]
elif getattr(self, name).__doc__:
cmds_doc.append(cmd)
else:
cmds_undoc.append(cmd)
self.stdout.write("%s\n"%str(self.doc_leader))
self.print_topics(self.doc_header, cmds_doc, 15,80)
self.print_topics(self.misc_header, help.keys(),15,80)
self.print_topics(self.undoc_header, cmds_undoc, 15,80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("%s\n"%str(self.ruler * len(header)))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
def columnize(self, list, displaywidth=80):
"""Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).
"""
if not list:
self.stdout.write("<empty>\n")
return
nonstrings = [i for i in range(len(list))
if not isinstance(list[i], str)]
if nonstrings:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(str, nonstrings)))
size = len(list)
if size == 1:
self.stdout.write('%s\n'%str(list[0]))
return
# Try every row count from 1 upwards
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > displaywidth:
break
if totwidth <= displaywidth:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
else:
x = list[i]
texts.append(x)
while texts and not texts[-1]:
del texts[-1]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write("%s\n"%str(" ".join(texts)))

View File

@ -1,307 +0,0 @@
"""Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
map(self.write, list)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
try:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(type, value)
finally:
tblist = tb = None
map(self.write, list)
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
raw_input(); a subclass may replace this with a different
implementation.
"""
return raw_input(prompt)
def interact(banner=None, readfunc=None, local=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
if __name__ == '__main__':
import pdb
pdb.run("interact()\n")

File diff suppressed because it is too large Load Diff

View File

@ -1,134 +0,0 @@
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), that take progam text, a filename and a 'mode'
and:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
# import internals, not guaranteed interface
from org.python.core import Py,CompilerFlags,CompileMode
from org.python.core.CompilerFlags import PyCF_DONT_IMPLY_DEDENT
# public interface
__all__ = ["compile_command", "Compile", "CommandCompiler"]
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
if symbol not in ['single','eval']:
raise ValueError,"symbol arg must be either single or eval"
symbol = CompileMode.getMode(symbol)
return Py.compile_command_flags(source,filename,symbol,Py.getCompilerFlags(),0)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self._cflags = CompilerFlags()
def __call__(self, source, filename, symbol):
symbol = CompileMode.getMode(symbol)
return Py.compile_flags(source, filename, symbol, self._cflags)
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self._cflags = CompilerFlags()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
if symbol not in ['single','eval']:
raise ValueError,"symbol arg must be either single or eval"
symbol = CompileMode.getMode(symbol)
return Py.compile_command_flags(source,filename,symbol,self._cflags,0)

View File

@ -1,135 +0,0 @@
from _collections import defaultdict, deque
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
__all__ = ['defaultdict', 'deque', 'namedtuple']
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(field_names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(cls, %(argtxt)s):
return tuple.__new__(cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %(field_names)r, self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace
namespace = dict(itemgetter=_itemgetter)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals['__name__']
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3,4), Point(14,5), Point(9./7,6):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())

View File

@ -1,126 +0,0 @@
"""Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0: r = 0.0
if g < 0.0: g = 0.0
if b < 0.0: b = 0.0
if r > 1.0: r = 1.0
if g > 1.0: g = 1.0
if b > 1.0: b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc: return 0.0, l, 0.0
if l <= 0.5: s = (maxc-minc) / (maxc+minc)
else: s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0: return l, l, l
if l <= 0.5: m2 = l * (1.0+s)
else: m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
if hue < 0.5: return m2
if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc: return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc: h = bc-gc
elif g == maxc: h = 2.0+rc-bc
else: h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0: return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
if i%6 == 0: return v, t, p
if i == 1: return q, v, p
if i == 2: return p, v, t
if i == 3: return p, q, v
if i == 4: return t, p, v
if i == 5: return v, p, q
# Cannot get here

View File

@ -1,84 +0,0 @@
"""Execute shell commands via os.popen() and return status, output.
Interface summary:
import commands
outtext = commands.getoutput(cmd)
(exitstatus, outtext) = commands.getstatusoutput(cmd)
outtext = commands.getstatus(file) # returns output of "ls -ld file"
A trailing newline is removed from the output string.
Encapsulates the basic operation:
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
[Note: it would be nice to add functions to interpret the exit status.]
"""
__all__ = ["getstatusoutput","getoutput","getstatus"]
# Module 'commands'
#
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for UNIX.
# Get 'ls -l' status for an object into a string
#
def getstatus(file):
"""Return output of "ls -ld <file>" in a string."""
return getoutput('ls -ld' + mkarg(file))
# Get the output from a shell command into a string.
# The exit status is ignored; a trailing newline is stripped.
# Assume the command will work with '{ ... ; } 2>&1' around it..
#
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell."""
return getstatusoutput(cmd)[1]
# Ditto but preserving the exit status.
# Returns a pair (sts, output)
#
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
import os
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == '\n': text = text[:-1]
return sts, text
# Make command argument from directory and pathname (prefix space, add quotes).
#
def mk2arg(head, x):
import os
return mkarg(os.path.join(head, x))
# Make a shell command argument from a string.
# Return a string beginning with a space followed by a shell-quoted
# version of the argument.
# Two strategies: enclose in single quotes if it contains none;
# otherwise, enclose in double quotes and prefix quotable characters
# with backslash.
#
def mkarg(x):
if '\'' not in x:
return ' \'' + x + '\''
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s

View File

@ -1,157 +0,0 @@
"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
__all__ = ["compile_dir","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
continue
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
cfile = fullname + (__debug__ and 'c' or 'o')
ftime = os.stat(fullname).st_mtime
try: ctime = os.stat(cfile).st_mtime
except os.error: ctime = 0
if (ctime > ftime) and not force: continue
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except KeyboardInterrupt:
raise KeyboardInterrupt
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, quiet):
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [directory ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: quiet operation"
print "-d destdir: purported directory name for error messages"
print " if no directory arguments, -l sys.path is assumed"
print "-x regexp: skip files matching the regular expression regexp"
print " the regexp is search for in the full path of the file"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args:
for dir in args:
if not compile_dir(dir, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)

View File

@ -1,26 +0,0 @@
"""Package for parsing and compiling Python source code
There are several functions defined at the top level that are imported
from modules contained in the package.
parse(buf, mode="exec") -> AST
Converts a string containing Python source code to an abstract
syntax tree (AST). The AST is defined in compiler.ast.
parseFile(path) -> AST
The same as parse(open(path))
walk(ast, visitor, verbose=None)
Does a pre-order walk over the ast using the visitor instance.
See compiler.visitor for details.
compile(source, filename, mode, flags=None, dont_inherit=None)
Returns a code object. A replacement for the builtin compile() function.
compileFile(filename)
Generates a .pyc file by compiling filename.
"""
from compiler.transformer import parse, parseFile
from compiler.visitor import walk
from compiler.pycodegen import compile, compileFile

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
# operation flags
OP_ASSIGN = 'OP_ASSIGN'
OP_DELETE = 'OP_DELETE'
OP_APPLY = 'OP_APPLY'
SC_LOCAL = 1
SC_GLOBAL = 2
SC_FREE = 3
SC_CELL = 4
SC_UNKNOWN = 5
CO_OPTIMIZED = 0x0001
CO_NEWLOCALS = 0x0002
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010
CO_GENERATOR = 0x0020
CO_GENERATOR_ALLOWED = 0
CO_FUTURE_DIVISION = 0x2000
CO_FUTURE_ABSIMPORT = 0x4000
CO_FUTURE_WITH_STATEMENT = 0x8000

View File

@ -1,73 +0,0 @@
"""Parser for future statements
"""
from compiler import ast, walk
def is_future(stmt):
"""Return true if statement is a well-formed future statement"""
if not isinstance(stmt, ast.From):
return 0
if stmt.modname == "__future__":
return 1
else:
return 0
class FutureParser:
features = ("nested_scopes", "generators", "division",
"absolute_import", "with_statement")
def __init__(self):
self.found = {} # set
def visitModule(self, node):
stmt = node.node
for s in stmt.nodes:
if not self.check_stmt(s):
break
def check_stmt(self, stmt):
if is_future(stmt):
for name, asname in stmt.names:
if name in self.features:
self.found[name] = 1
else:
raise SyntaxError, \
"future feature %s is not defined" % name
stmt.valid_future = 1
return 1
return 0
def get_features(self):
"""Return list of features enabled by future statements"""
return self.found.keys()
class BadFutureParser:
"""Check for invalid future statements"""
def visitFrom(self, node):
if hasattr(node, 'valid_future'):
return
if node.modname != "__future__":
return
raise SyntaxError, "invalid future statement " + repr(node)
def find_futures(node):
p1 = FutureParser()
p2 = BadFutureParser()
walk(node, p1)
walk(node, p2)
return p1.get_features()
if __name__ == "__main__":
import sys
from compiler import parseFile, walk
for file in sys.argv[1:]:
print file
tree = parseFile(file)
v = FutureParser()
walk(tree, v)
print v.found
print

View File

@ -1,73 +0,0 @@
def flatten(tup):
elts = []
for elt in tup:
if isinstance(elt, tuple):
elts = elts + flatten(elt)
else:
elts.append(elt)
return elts
class Set:
def __init__(self):
self.elts = {}
def __len__(self):
return len(self.elts)
def __contains__(self, elt):
return self.elts.has_key(elt)
def add(self, elt):
self.elts[elt] = elt
def elements(self):
return self.elts.keys()
def has_elt(self, elt):
return self.elts.has_key(elt)
def remove(self, elt):
del self.elts[elt]
def copy(self):
c = Set()
c.elts.update(self.elts)
return c
class Stack:
def __init__(self):
self.stack = []
self.pop = self.stack.pop
def __len__(self):
return len(self.stack)
def push(self, elt):
self.stack.append(elt)
def top(self):
return self.stack[-1]
def __getitem__(self, index): # needed by visitContinue()
return self.stack[index]
MANGLE_LEN = 256 # magic constant from compile.c
def mangle(name, klass):
if not name.startswith('__'):
return name
if len(name) + 2 >= MANGLE_LEN:
return name
if name.endswith('__'):
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
klass = klass[i:]
tlen = len(klass) + len(name)
if tlen > MANGLE_LEN:
klass = klass[:MANGLE_LEN-tlen]
return "_%s%s" % (klass, name)
def set_filename(filename, tree):
"""Set the filename attribute to filename on every node in tree"""
worklist = [tree]
while worklist:
node = worklist.pop(0)
node.filename = filename
worklist.extend(node.getChildNodes())

View File

@ -1,818 +0,0 @@
"""A flow graph representation for Python bytecode"""
import dis
import new
import sys
from compiler import misc
from compiler.consts \
import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
class FlowGraph:
def __init__(self):
self.current = self.entry = Block()
self.exit = Block("exit")
self.blocks = misc.Set()
self.blocks.add(self.entry)
self.blocks.add(self.exit)
def startBlock(self, block):
if self._debug:
if self.current:
print "end", repr(self.current)
print " next", self.current.next
print " ", self.current.get_children()
print repr(block)
self.current = block
def nextBlock(self, block=None):
# XXX think we need to specify when there is implicit transfer
# from one block to the next. might be better to represent this
# with explicit JUMP_ABSOLUTE instructions that are optimized
# out when they are unnecessary.
#
# I think this strategy works: each block has a child
# designated as "next" which is returned as the last of the
# children. because the nodes in a graph are emitted in
# reverse post order, the "next" block will always be emitted
# immediately after its parent.
# Worry: maintaining this invariant could be tricky
if block is None:
block = self.newBlock()
# Note: If the current block ends with an unconditional
# control transfer, then it is incorrect to add an implicit
# transfer to the block graph. The current code requires
# these edges to get the blocks emitted in the right order,
# however. :-( If a client needs to remove these edges, call
# pruneEdges().
self.current.addNext(block)
self.startBlock(block)
def newBlock(self):
b = Block()
self.blocks.add(b)
return b
def startExitBlock(self):
self.startBlock(self.exit)
_debug = 0
def _enable_debug(self):
self._debug = 1
def _disable_debug(self):
self._debug = 0
def emit(self, *inst):
if self._debug:
print "\t", inst
if inst[0] in ['RETURN_VALUE', 'YIELD_VALUE']:
self.current.addOutEdge(self.exit)
if len(inst) == 2 and isinstance(inst[1], Block):
self.current.addOutEdge(inst[1])
self.current.emit(inst)
def getBlocksInOrder(self):
"""Return the blocks in reverse postorder
i.e. each node appears before all of its successors
"""
# XXX make sure every node that doesn't have an explicit next
# is set so that next points to exit
for b in self.blocks.elements():
if b is self.exit:
continue
if not b.next:
b.addNext(self.exit)
order = dfs_postorder(self.entry, {})
order.reverse()
self.fixupOrder(order, self.exit)
# hack alert
if not self.exit in order:
order.append(self.exit)
return order
def fixupOrder(self, blocks, default_next):
"""Fixup bad order introduced by DFS."""
# XXX This is a total mess. There must be a better way to get
# the code blocks in the right order.
self.fixupOrderHonorNext(blocks, default_next)
self.fixupOrderForward(blocks, default_next)
def fixupOrderHonorNext(self, blocks, default_next):
"""Fix one problem with DFS.
The DFS uses child block, but doesn't know about the special
"next" block. As a result, the DFS can order blocks so that a
block isn't next to the right block for implicit control
transfers.
"""
index = {}
for i in range(len(blocks)):
index[blocks[i]] = i
for i in range(0, len(blocks) - 1):
b = blocks[i]
n = blocks[i + 1]
if not b.next or b.next[0] == default_next or b.next[0] == n:
continue
# The blocks are in the wrong order. Find the chain of
# blocks to insert where they belong.
cur = b
chain = []
elt = cur
while elt.next and elt.next[0] != default_next:
chain.append(elt.next[0])
elt = elt.next[0]
# Now remove the blocks in the chain from the current
# block list, so that they can be re-inserted.
l = []
for b in chain:
assert index[b] > i
l.append((index[b], b))
l.sort()
l.reverse()
for j, b in l:
del blocks[index[b]]
# Insert the chain in the proper location
blocks[i:i + 1] = [cur] + chain
# Finally, re-compute the block indexes
for i in range(len(blocks)):
index[blocks[i]] = i
def fixupOrderForward(self, blocks, default_next):
"""Make sure all JUMP_FORWARDs jump forward"""
index = {}
chains = []
cur = []
for b in blocks:
index[b] = len(chains)
cur.append(b)
if b.next and b.next[0] == default_next:
chains.append(cur)
cur = []
chains.append(cur)
while 1:
constraints = []
for i in range(len(chains)):
l = chains[i]
for b in l:
for c in b.get_children():
if index[c] < i:
forward_p = 0
for inst in b.insts:
if inst[0] == 'JUMP_FORWARD':
if inst[1] == c:
forward_p = 1
if not forward_p:
continue
constraints.append((index[c], i))
if not constraints:
break
# XXX just do one for now
# do swaps to get things in the right order
goes_before, a_chain = constraints[0]
assert a_chain > goes_before
c = chains[a_chain]
chains.remove(c)
chains.insert(goes_before, c)
del blocks[:]
for c in chains:
for b in c:
blocks.append(b)
def getBlocks(self):
return self.blocks.elements()
def getRoot(self):
"""Return nodes appropriate for use with dominator"""
return self.entry
def getContainedGraphs(self):
l = []
for b in self.getBlocks():
l.extend(b.getContainedGraphs())
return l
def dfs_postorder(b, seen):
"""Depth-first search of tree rooted at b, return in postorder"""
order = []
seen[b] = b
for c in b.get_children():
if seen.has_key(c):
continue
order = order + dfs_postorder(c, seen)
order.append(b)
return order
class Block:
_count = 0
def __init__(self, label=''):
self.insts = []
self.inEdges = misc.Set()
self.outEdges = misc.Set()
self.label = label
self.bid = Block._count
self.next = []
Block._count = Block._count + 1
def __repr__(self):
if self.label:
return "<block %s id=%d>" % (self.label, self.bid)
else:
return "<block id=%d>" % (self.bid)
def __str__(self):
insts = map(str, self.insts)
return "<block %s %d:\n%s>" % (self.label, self.bid,
'\n'.join(insts))
def emit(self, inst):
op = inst[0]
if op[:4] == 'JUMP':
self.outEdges.add(inst[1])
self.insts.append(inst)
def getInstructions(self):
return self.insts
def addInEdge(self, block):
self.inEdges.add(block)
def addOutEdge(self, block):
self.outEdges.add(block)
def addNext(self, block):
self.next.append(block)
assert len(self.next) == 1, map(str, self.next)
_uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS', 'YIELD_VALUE',
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
def pruneNext(self):
"""Remove bogus edge for unconditional transfers
Each block has a next edge that accounts for implicit control
transfers, e.g. from a JUMP_IF_FALSE to the block that will be
executed if the test is true.
These edges must remain for the current assembler code to
work. If they are removed, the dfs_postorder gets things in
weird orders. However, they shouldn't be there for other
purposes, e.g. conversion to SSA form. This method will
remove the next edge when it follows an unconditional control
transfer.
"""
try:
op, arg = self.insts[-1]
except (IndexError, ValueError):
return
if op in self._uncond_transfer:
self.next = []
def get_children(self):
if self.next and self.next[0] in self.outEdges:
self.outEdges.remove(self.next[0])
return self.outEdges.elements() + self.next
def getContainedGraphs(self):
"""Return all graphs contained within this block.
For example, a MAKE_FUNCTION block will contain a reference to
the graph for the function body.
"""
contained = []
for inst in self.insts:
if len(inst) == 1:
continue
op = inst[1]
if hasattr(op, 'graph'):
contained.append(op.graph)
return contained
# flags for code objects
# the FlowGraph is transformed in place; it exists in one of these states
RAW = "RAW"
FLAT = "FLAT"
CONV = "CONV"
DONE = "DONE"
class PyFlowGraph(FlowGraph):
super_init = FlowGraph.__init__
def __init__(self, name, filename, args=(), optimized=0, klass=None):
self.super_init()
self.name = name
self.filename = filename
self.docstring = None
self.args = args # XXX
self.argcount = getArgCount(args)
self.klass = klass
if optimized:
self.flags = CO_OPTIMIZED | CO_NEWLOCALS
else:
self.flags = 0
self.consts = []
self.names = []
# Free variables found by the symbol table scan, including
# variables used only in nested scopes, are included here.
self.freevars = []
self.cellvars = []
# The closure list is used to track the order of cell
# variables and free variables in the resulting code object.
# The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
# kinds of variables.
self.closure = []
self.varnames = list(args) or []
for i in range(len(self.varnames)):
var = self.varnames[i]
if isinstance(var, TupleArg):
self.varnames[i] = var.getName()
self.stage = RAW
def setDocstring(self, doc):
self.docstring = doc
def setFlag(self, flag):
self.flags = self.flags | flag
if flag == CO_VARARGS:
self.argcount = self.argcount - 1
def checkFlag(self, flag):
if self.flags & flag:
return 1
def setFreeVars(self, names):
self.freevars = list(names)
def setCellVars(self, names):
self.cellvars = names
def getCode(self):
"""Get a Python code object"""
assert self.stage == RAW
self.computeStackDepth()
self.flattenGraph()
assert self.stage == FLAT
self.convertArgs()
assert self.stage == CONV
self.makeByteCode()
assert self.stage == DONE
return self.newCodeObject()
def dump(self, io=None):
if io:
save = sys.stdout
sys.stdout = io
pc = 0
for t in self.insts:
opname = t[0]
if opname == "SET_LINENO":
print
if len(t) == 1:
print "\t", "%3d" % pc, opname
pc = pc + 1
else:
print "\t", "%3d" % pc, opname, t[1]
pc = pc + 3
if io:
sys.stdout = save
def computeStackDepth(self):
"""Compute the max stack depth.
Approach is to compute the stack effect of each basic block.
Then find the path through the code with the largest total
effect.
"""
depth = {}
exit = None
for b in self.getBlocks():
depth[b] = findDepth(b.getInstructions())
seen = {}
def max_depth(b, d):
if seen.has_key(b):
return d
seen[b] = 1
d = d + depth[b]
children = b.get_children()
if children:
return max([max_depth(c, d) for c in children])
else:
if not b.label == "exit":
return max_depth(self.exit, d)
else:
return d
self.stacksize = max_depth(self.entry, 0)
def flattenGraph(self):
"""Arrange the blocks in order and resolve jumps"""
assert self.stage == RAW
self.insts = insts = []
pc = 0
begin = {}
end = {}
for b in self.getBlocksInOrder():
begin[b] = pc
for inst in b.getInstructions():
insts.append(inst)
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
# arg takes 2 bytes
pc = pc + 3
end[b] = pc
pc = 0
for i in range(len(insts)):
inst = insts[i]
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
pc = pc + 3
opname = inst[0]
if self.hasjrel.has_elt(opname):
oparg = inst[1]
offset = begin[oparg] - pc
insts[i] = opname, offset
elif self.hasjabs.has_elt(opname):
insts[i] = opname, begin[inst[1]]
self.stage = FLAT
hasjrel = misc.Set()
for i in dis.hasjrel:
hasjrel.add(dis.opname[i])
hasjabs = misc.Set()
for i in dis.hasjabs:
hasjabs.add(dis.opname[i])
def convertArgs(self):
"""Convert arguments from symbolic to concrete form"""
assert self.stage == FLAT
self.consts.insert(0, self.docstring)
self.sort_cellvars()
for i in range(len(self.insts)):
t = self.insts[i]
if len(t) == 2:
opname, oparg = t
conv = self._converters.get(opname, None)
if conv:
self.insts[i] = opname, conv(self, oparg)
self.stage = CONV
def sort_cellvars(self):
"""Sort cellvars in the order of varnames and prune from freevars.
"""
cells = {}
for name in self.cellvars:
cells[name] = 1
self.cellvars = [name for name in self.varnames
if cells.has_key(name)]
for name in self.cellvars:
del cells[name]
self.cellvars = self.cellvars + cells.keys()
self.closure = self.cellvars + self.freevars
def _lookupName(self, name, list):
"""Return index of name in list, appending if necessary
This routine uses a list instead of a dictionary, because a
dictionary can't store two different keys if the keys have the
same value but different types, e.g. 2 and 2L. The compiler
must treat these two separately, so it does an explicit type
comparison before comparing the values.
"""
t = type(name)
for i in range(len(list)):
if t == type(list[i]) and list[i] == name:
return i
end = len(list)
list.append(name)
return end
_converters = {}
def _convert_LOAD_CONST(self, arg):
if hasattr(arg, 'getCode'):
arg = arg.getCode()
return self._lookupName(arg, self.consts)
def _convert_LOAD_FAST(self, arg):
self._lookupName(arg, self.names)
return self._lookupName(arg, self.varnames)
_convert_STORE_FAST = _convert_LOAD_FAST
_convert_DELETE_FAST = _convert_LOAD_FAST
def _convert_LOAD_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
def _convert_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
_convert_STORE_NAME = _convert_NAME
_convert_DELETE_NAME = _convert_NAME
_convert_IMPORT_NAME = _convert_NAME
_convert_IMPORT_FROM = _convert_NAME
_convert_STORE_ATTR = _convert_NAME
_convert_LOAD_ATTR = _convert_NAME
_convert_DELETE_ATTR = _convert_NAME
_convert_LOAD_GLOBAL = _convert_NAME
_convert_STORE_GLOBAL = _convert_NAME
_convert_DELETE_GLOBAL = _convert_NAME
def _convert_DEREF(self, arg):
self._lookupName(arg, self.names)
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_convert_LOAD_DEREF = _convert_DEREF
_convert_STORE_DEREF = _convert_DEREF
def _convert_LOAD_CLOSURE(self, arg):
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_cmp = list(dis.cmp_op)
def _convert_COMPARE_OP(self, arg):
return self._cmp.index(arg)
# similarly for other opcodes...
for name, obj in locals().items():
if name[:9] == "_convert_":
opname = name[9:]
_converters[opname] = obj
del name, obj, opname
def makeByteCode(self):
assert self.stage == CONV
self.lnotab = lnotab = LineAddrTable()
for t in self.insts:
opname = t[0]
if len(t) == 1:
lnotab.addCode(self.opnum[opname])
else:
oparg = t[1]
if opname == "SET_LINENO":
lnotab.nextLine(oparg)
continue
hi, lo = twobyte(oparg)
try:
lnotab.addCode(self.opnum[opname], lo, hi)
except ValueError:
print opname, oparg
print self.opnum[opname], lo, hi
raise
self.stage = DONE
opnum = {}
for num in range(len(dis.opname)):
opnum[dis.opname[num]] = num
del num
def newCodeObject(self):
assert self.stage == DONE
if (self.flags & CO_NEWLOCALS) == 0:
nlocals = 0
else:
nlocals = len(self.varnames)
argcount = self.argcount
if self.flags & CO_VARKEYWORDS:
argcount = argcount - 1
return new.code(argcount, nlocals, self.stacksize, self.flags,
self.lnotab.getCode(), self.getConsts(),
tuple(self.names), tuple(self.varnames),
self.filename, self.name, self.lnotab.firstline,
self.lnotab.getTable(), tuple(self.freevars),
tuple(self.cellvars))
def getConsts(self):
"""Return a tuple for the const slot of the code object
Must convert references to code (MAKE_FUNCTION) to code
objects recursively.
"""
l = []
for elt in self.consts:
if isinstance(elt, PyFlowGraph):
elt = elt.getCode()
l.append(elt)
return tuple(l)
def isJump(opname):
if opname[:4] == 'JUMP':
return 1
class TupleArg:
"""Helper for marking func defs with nested tuples in arglist"""
def __init__(self, count, names):
self.count = count
self.names = names
def __repr__(self):
return "TupleArg(%s, %s)" % (self.count, self.names)
def getName(self):
return ".%d" % self.count
def getArgCount(args):
argcount = len(args)
if args:
for arg in args:
if isinstance(arg, TupleArg):
numNames = len(misc.flatten(arg.names))
argcount = argcount - numNames
return argcount
def twobyte(val):
"""Convert an int argument into high and low bytes"""
assert isinstance(val, int)
return divmod(val, 256)
class LineAddrTable:
"""lnotab
This class builds the lnotab, which is documented in compile.c.
Here's a brief recap:
For each SET_LINENO instruction after the first one, two bytes are
added to lnotab. (In some cases, multiple two-byte entries are
added.) The first byte is the distance in bytes between the
instruction for the last SET_LINENO and the current SET_LINENO.
The second byte is offset in line numbers. If either offset is
greater than 255, multiple two-byte entries are added -- see
compile.c for the delicate details.
"""
def __init__(self):
self.code = []
self.codeOffset = 0
self.firstline = 0
self.lastline = 0
self.lastoff = 0
self.lnotab = []
def addCode(self, *args):
for arg in args:
self.code.append(chr(arg))
self.codeOffset = self.codeOffset + len(args)
def nextLine(self, lineno):
if self.firstline == 0:
self.firstline = lineno
self.lastline = lineno
else:
# compute deltas
addr = self.codeOffset - self.lastoff
line = lineno - self.lastline
# Python assumes that lineno always increases with
# increasing bytecode address (lnotab is unsigned char).
# Depending on when SET_LINENO instructions are emitted
# this is not always true. Consider the code:
# a = (1,
# b)
# In the bytecode stream, the assignment to "a" occurs
# after the loading of "b". This works with the C Python
# compiler because it only generates a SET_LINENO instruction
# for the assignment.
if line >= 0:
push = self.lnotab.append
while addr > 255:
push(255); push(0)
addr -= 255
while line > 255:
push(addr); push(255)
line -= 255
addr = 0
if addr > 0 or line > 0:
push(addr); push(line)
self.lastline = lineno
self.lastoff = self.codeOffset
def getCode(self):
return ''.join(self.code)
def getTable(self):
return ''.join(map(chr, self.lnotab))
class StackDepthTracker:
# XXX 1. need to keep track of stack depth on jumps
# XXX 2. at least partly as a result, this code is broken
def findDepth(self, insts, debug=0):
depth = 0
maxDepth = 0
for i in insts:
opname = i[0]
if debug:
print i,
delta = self.effect.get(opname, None)
if delta is not None:
depth = depth + delta
else:
# now check patterns
for pat, pat_delta in self.patterns:
if opname[:len(pat)] == pat:
delta = pat_delta
depth = depth + delta
break
# if we still haven't found a match
if delta is None:
meth = getattr(self, opname, None)
if meth is not None:
depth = depth + meth(i[1])
if depth > maxDepth:
maxDepth = depth
if debug:
print depth, maxDepth
return maxDepth
effect = {
'POP_TOP': -1,
'DUP_TOP': 1,
'LIST_APPEND': -2,
'SLICE+1': -1,
'SLICE+2': -1,
'SLICE+3': -2,
'STORE_SLICE+0': -1,
'STORE_SLICE+1': -2,
'STORE_SLICE+2': -2,
'STORE_SLICE+3': -3,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
# PRINT_EXPR?
'PRINT_ITEM': -1,
'RETURN_VALUE': -1,
'YIELD_VALUE': -1,
'EXEC_STMT': -3,
'BUILD_CLASS': -2,
'STORE_NAME': -1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'BUILD_MAP': 1,
'COMPARE_OP': -1,
'STORE_FAST': -1,
'IMPORT_STAR': -1,
'IMPORT_NAME': -1,
'IMPORT_FROM': 1,
'LOAD_ATTR': 0, # unlike other loads
# close enough...
'SETUP_EXCEPT': 3,
'SETUP_FINALLY': 3,
'FOR_ITER': 1,
'WITH_CLEANUP': -1,
}
# use pattern match
patterns = [
('BINARY_', -1),
('LOAD_', 1),
]
def UNPACK_SEQUENCE(self, count):
return count-1
def BUILD_TUPLE(self, count):
return -count+1
def BUILD_LIST(self, count):
return -count+1
def CALL_FUNCTION(self, argc):
hi, lo = divmod(argc, 256)
return -(lo + hi * 2)
def CALL_FUNCTION_VAR(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_KW(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_VAR_KW(self, argc):
return self.CALL_FUNCTION(argc)-2
def MAKE_FUNCTION(self, argc):
return -argc
def MAKE_CLOSURE(self, argc):
# XXX need to account for free variables too!
return -argc
def BUILD_SLICE(self, argc):
if argc == 2:
return -1
elif argc == 3:
return -2
def DUP_TOPX(self, argc):
return argc
findDepth = StackDepthTracker().findDepth

View File

@ -1,463 +0,0 @@
"""Module symbol-table generator"""
from compiler import ast
from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
from compiler.misc import mangle
import types
import sys
MANGLE_LEN = 256
class Scope:
# XXX how much information do I need about each name?
def __init__(self, name, module, klass=None):
self.name = name
self.module = module
self.defs = {}
self.uses = {}
self.globals = {}
self.params = {}
self.frees = {}
self.cells = {}
self.children = []
# nested is true if the class could contain free variables,
# i.e. if it is nested within another function.
self.nested = None
self.generator = None
self.klass = None
if klass is not None:
for i in range(len(klass)):
if klass[i] != '_':
self.klass = klass[i:]
break
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
def mangle(self, name):
if self.klass is None:
return name
return mangle(name, self.klass)
def add_def(self, name):
self.defs[self.mangle(name)] = 1
def add_use(self, name):
self.uses[self.mangle(name)] = 1
def add_global(self, name):
name = self.mangle(name)
if self.uses.has_key(name) or self.defs.has_key(name):
pass # XXX warn about global following def/use
if self.params.has_key(name):
raise SyntaxError, "%s in %s is global and parameter" % \
(name, self.name)
self.globals[name] = 1
self.module.add_def(name)
def add_param(self, name):
name = self.mangle(name)
self.defs[name] = 1
self.params[name] = 1
def get_names(self):
d = {}
d.update(self.defs)
d.update(self.uses)
d.update(self.globals)
return d.keys()
def add_child(self, child):
self.children.append(child)
def get_children(self):
return self.children
def DEBUG(self):
print >> sys.stderr, self.name, self.nested and "nested" or ""
print >> sys.stderr, "\tglobals: ", self.globals
print >> sys.stderr, "\tcells: ", self.cells
print >> sys.stderr, "\tdefs: ", self.defs
print >> sys.stderr, "\tuses: ", self.uses
print >> sys.stderr, "\tfrees:", self.frees
def check_name(self, name):
"""Return scope of name.
The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
"""
if self.globals.has_key(name):
return SC_GLOBAL
if self.cells.has_key(name):
return SC_CELL
if self.defs.has_key(name):
return SC_LOCAL
if self.nested and (self.frees.has_key(name) or
self.uses.has_key(name)):
return SC_FREE
if self.nested:
return SC_UNKNOWN
else:
return SC_GLOBAL
def get_free_vars(self):
if not self.nested:
return ()
free = {}
free.update(self.frees)
for name in self.uses.keys():
if not (self.defs.has_key(name) or
self.globals.has_key(name)):
free[name] = 1
return free.keys()
def handle_children(self):
for child in self.children:
frees = child.get_free_vars()
globals = self.add_frees(frees)
for name in globals:
child.force_global(name)
def force_global(self, name):
"""Force name to be global in scope.
Some child of the current node had a free reference to name.
When the child was processed, it was labelled a free
variable. Now that all its enclosing scope have been
processed, the name is known to be a global or builtin. So
walk back down the child chain and set the name to be global
rather than free.
Be careful to stop if a child does not think the name is
free.
"""
self.globals[name] = 1
if self.frees.has_key(name):
del self.frees[name]
for child in self.children:
if child.check_name(name) == SC_FREE:
child.force_global(name)
def add_frees(self, names):
"""Process list of free vars from nested scope.
Returns a list of names that are either 1) declared global in the
parent or 2) undefined in a top-level parent. In either case,
the nested scope should treat them as globals.
"""
child_globals = []
for name in names:
sc = self.check_name(name)
if self.nested:
if sc == SC_UNKNOWN or sc == SC_FREE \
or isinstance(self, ClassScope):
self.frees[name] = 1
elif sc == SC_GLOBAL:
child_globals.append(name)
elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
else:
if sc == SC_LOCAL:
self.cells[name] = 1
elif sc != SC_CELL:
child_globals.append(name)
return child_globals
def get_cell_vars(self):
return self.cells.keys()
class ModuleScope(Scope):
__super_init = Scope.__init__
def __init__(self):
self.__super_init("global", self)
class FunctionScope(Scope):
pass
class GenExprScope(Scope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("generator expression<%d>"%i, module, klass)
self.add_param('.0')
def get_names(self):
keys = Scope.get_names(self)
return keys
class LambdaScope(FunctionScope):
__super_init = Scope.__init__
__counter = 1
def __init__(self, module, klass=None):
i = self.__counter
self.__counter += 1
self.__super_init("lambda.%d" % i, module, klass)
class ClassScope(Scope):
__super_init = Scope.__init__
def __init__(self, name, module):
self.__super_init(name, module, name)
class SymbolVisitor:
def __init__(self):
self.scopes = {}
self.klass = None
# node that define new scopes
def visitModule(self, node):
scope = self.module = self.scopes[node] = ModuleScope()
self.visit(node.node, scope)
visitExpression = visitModule
def visitFunction(self, node, parent):
if node.decorators:
self.visit(node.decorators, parent)
parent.add_def(node.name)
for n in node.defaults:
self.visit(n, parent)
scope = FunctionScope(node.name, self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExpr(self, node, parent):
scope = GenExprScope(self.module, self.klass);
if parent.nested or isinstance(parent, FunctionScope) \
or isinstance(parent, GenExprScope):
scope.nested = 1
self.scopes[node] = scope
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def visitGenExprInner(self, node, scope):
for genfor in node.quals:
self.visit(genfor, scope)
self.visit(node.expr, scope)
def visitGenExprFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.iter, scope)
for if_ in node.ifs:
self.visit(if_, scope)
def visitGenExprIf(self, node, scope):
self.visit(node.test, scope)
def visitLambda(self, node, parent, assign=0):
# Lambda is an expression, so it could appear in an expression
# context where assign is passed. The transformer should catch
# any code that has a lambda on the left-hand side.
assert not assign
for n in node.defaults:
self.visit(n, parent)
scope = LambdaScope(self.module, self.klass)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
self.scopes[node] = scope
self._do_args(scope, node.argnames)
self.visit(node.code, scope)
self.handle_free_vars(scope, parent)
def _do_args(self, scope, args):
for name in args:
if type(name) == types.TupleType:
self._do_args(scope, name)
else:
scope.add_param(name)
def handle_free_vars(self, scope, parent):
parent.add_child(scope)
scope.handle_children()
def visitClass(self, node, parent):
parent.add_def(node.name)
for n in node.bases:
self.visit(n, parent)
scope = ClassScope(node.name, self.module)
if parent.nested or isinstance(parent, FunctionScope):
scope.nested = 1
if node.doc is not None:
scope.add_def('__doc__')
scope.add_def('__module__')
self.scopes[node] = scope
prev = self.klass
self.klass = node.name
self.visit(node.code, scope)
self.klass = prev
self.handle_free_vars(scope, parent)
# name can be a def or a use
# XXX a few calls and nodes expect a third "assign" arg that is
# true if the name is being used as an assignment. only
# expressions contained within statements may have the assign arg.
def visitName(self, node, scope, assign=0):
if assign:
scope.add_def(node.name)
else:
scope.add_use(node.name)
# operations that bind new names
def visitFor(self, node, scope):
self.visit(node.assign, scope, 1)
self.visit(node.list, scope)
self.visit(node.body, scope)
if node.else_:
self.visit(node.else_, scope)
def visitFrom(self, node, scope):
for name, asname in node.names:
if name == "*":
continue
scope.add_def(asname or name)
def visitImport(self, node, scope):
for name, asname in node.names:
i = name.find(".")
if i > -1:
name = name[:i]
scope.add_def(asname or name)
def visitGlobal(self, node, scope):
for name in node.names:
scope.add_global(name)
def visitAssign(self, node, scope):
"""Propagate assignment flag down to child nodes.
The Assign node doesn't itself contains the variables being
assigned to. Instead, the children in node.nodes are visited
with the assign flag set to true. When the names occur in
those nodes, they are marked as defs.
Some names that occur in an assignment target are not bound by
the assignment, e.g. a name occurring inside a slice. The
visitor handles these nodes specially; they do not propagate
the assign flag to their children.
"""
for n in node.nodes:
self.visit(n, scope, 1)
self.visit(node.expr, scope)
def visitAssName(self, node, scope, assign=1):
scope.add_def(node.name)
def visitAssAttr(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
def visitSubscript(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
for n in node.subs:
self.visit(n, scope, 0)
def visitSlice(self, node, scope, assign=0):
self.visit(node.expr, scope, 0)
if node.lower:
self.visit(node.lower, scope, 0)
if node.upper:
self.visit(node.upper, scope, 0)
def visitAugAssign(self, node, scope):
# If the LHS is a name, then this counts as assignment.
# Otherwise, it's just use.
self.visit(node.node, scope)
if isinstance(node.node, ast.Name):
self.visit(node.node, scope, 1) # XXX worry about this
self.visit(node.expr, scope)
# prune if statements if tests are false
_const_types = types.StringType, types.IntType, types.FloatType
def visitIf(self, node, scope):
for test, body in node.tests:
if isinstance(test, ast.Const):
if type(test.value) in self._const_types:
if not test.value:
continue
self.visit(test, scope)
self.visit(body, scope)
if node.else_:
self.visit(node.else_, scope)
# a yield statement signals a generator
def visitYield(self, node, scope):
scope.generator = 1
self.visit(node.value, scope)
def list_eq(l1, l2):
return sorted(l1) == sorted(l2)
if __name__ == "__main__":
import sys
from compiler import parseFile, walk
import symtable
def get_names(syms):
return [s for s in [s.get_name() for s in syms.get_symbols()]
if not (s.startswith('_[') or s.startswith('.'))]
for file in sys.argv[1:]:
print file
f = open(file)
buf = f.read()
f.close()
syms = symtable.symtable(buf, file, "exec")
mod_names = get_names(syms)
tree = parseFile(file)
s = SymbolVisitor()
walk(tree, s)
# compare module-level symbols
names2 = s.scopes[tree].get_names()
if not list_eq(mod_names, names2):
print
print "oops", file
print sorted(mod_names)
print sorted(names2)
sys.exit(-1)
d = {}
d.update(s.scopes)
del d[tree]
scopes = d.values()
del d
for s in syms.get_symbols():
if s.is_namespace():
l = [sc for sc in scopes
if sc.name == s.get_name()]
if len(l) > 1:
print "skipping", s.get_name()
else:
if not list_eq(get_names(s.get_namespace()),
l[0].get_names()):
print s.get_name()
print sorted(get_names(s.get_namespace()))
print sorted(l[0].get_names())
sys.exit(-1)

View File

@ -1,46 +0,0 @@
"""Check for errs in the AST.
The Python parser does not catch all syntax errors. Others, like
assignments with invalid targets, are caught in the code generation
phase.
The compiler package catches some errors in the transformer module.
But it seems clearer to write checkers that use the AST to detect
errors.
"""
from compiler import ast, walk
def check(tree, multi=None):
v = SyntaxErrorChecker(multi)
walk(tree, v)
return v.errors
class SyntaxErrorChecker:
"""A visitor to find syntax errors in the AST."""
def __init__(self, multi=None):
"""Create new visitor object.
If optional argument multi is not None, then print messages
for each error rather than raising a SyntaxError for the
first.
"""
self.multi = multi
self.errors = 0
def error(self, node, msg):
self.errors = self.errors + 1
if self.multi is not None:
print "%s:%s: %s" % (node.filename, node.lineno, msg)
else:
raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
def visitAssign(self, node):
# the transformer module handles many of these
pass
## for target in node.nodes:
## if isinstance(target, ast.AssList):
## if target.lineno is None:
## target.lineno = node.lineno
## self.error(target, "can't assign to list comprehension")

View File

@ -1,113 +0,0 @@
from compiler import ast
# XXX should probably rename ASTVisitor to ASTWalker
# XXX can it be made even more generic?
class ASTVisitor:
"""Performs a depth-first walk of the AST
The ASTVisitor will walk the AST, performing either a preorder or
postorder traversal depending on which method is called.
methods:
preorder(tree, visitor)
postorder(tree, visitor)
tree: an instance of ast.Node
visitor: an instance with visitXXX methods
The ASTVisitor is responsible for walking over the tree in the
correct order. For each node, it checks the visitor argument for
a method named 'visitNodeType' where NodeType is the name of the
node's class, e.g. Class. If the method exists, it is called
with the node as its sole argument.
The visitor method for a particular node type can control how
child nodes are visited during a preorder walk. (It can't control
the order during a postorder walk, because it is called _after_
the walk has occurred.) The ASTVisitor modifies the visitor
argument by adding a visit method to the visitor; this method can
be used to visit a child node of arbitrary type.
"""
VERBOSE = 0
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in node.getChildNodes():
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass, None)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
## if self.VERBOSE > 0:
## className = klass.__name__
## if self.VERBOSE == 1:
## if meth == 0:
## print "dispatch", className
## else:
## print "dispatch", className, (meth and meth.__name__ or '')
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class ExampleASTVisitor(ASTVisitor):
"""Prints examples of the nodes that aren't visited
This visitor-driver is only useful for development, when it's
helpful to develop a visitor incrementally, and get feedback on what
you still have to do.
"""
examples = {}
def dispatch(self, node, *args):
self.node = node
meth = self._cache.get(node.__class__, None)
className = node.__class__.__name__
if meth is None:
meth = getattr(self.visitor, 'visit' + className, 0)
self._cache[node.__class__] = meth
if self.VERBOSE > 1:
print "dispatch", className, (meth and meth.__name__ or '')
if meth:
meth(node, *args)
elif self.VERBOSE > 0:
klass = node.__class__
if not self.examples.has_key(klass):
self.examples[klass] = klass
print
print self.visitor
print klass
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-12.12s" % attr, getattr(node, attr)
print
return self.default(node, *args)
# XXX this is an API change
_walker = ASTVisitor
def walk(tree, visitor, walker=None, verbose=None):
if walker is None:
walker = _walker()
if verbose is not None:
walker.VERBOSE = verbose
walker.preorder(tree, visitor)
return walker.visitor
def dumpNode(node):
print node.__class__
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-10.10s" % attr, getattr(node, attr)

View File

@ -1,158 +0,0 @@
"""Utilities for with-statement contexts. See PEP 343."""
import sys
__all__ = ["contextmanager", "nested", "closing"]
class GeneratorContextManager(object):
"""Helper for @contextmanager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
self.gen.next()
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration, exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
try:
helper.__name__ = func.__name__
helper.__doc__ = func.__doc__
helper.__dict__ = func.__dict__
except:
pass
return helper
@contextmanager
def nested(*managers):
"""Support multiple context managers in a single with-statement.
Code like this:
with nested(A, B, C) as (X, Y, Z):
<body>
is equivalent to this:
with A as X:
with B as Y:
with C as Z:
<body>
"""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()

File diff suppressed because it is too large Load Diff

View File

@ -1,414 +0,0 @@
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
from copy_reg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, long, float, bool, str, tuple,
frozenset, type, xrange, types.ClassType,
types.BuiltinFunctionType,
types.FunctionType):
d[t] = _copy_immutable
for name in ("ComplexType", "UnicodeType", "CodeType"):
t = getattr(types, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
def _copy_inst(x):
if hasattr(x, '__copy__'):
return x.__copy__()
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _copy_inst
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[long] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[str] = _deepcopy_atomic
try:
d[unicode] = _deepcopy_atomic
except NameError:
pass
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[xrange] = _deepcopy_atomic
d[types.ClassType] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
d = id(x)
try:
return memo[d]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
memo[d] = y
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.iteritems():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _deepcopy_inst(x, memo):
if hasattr(x, '__deepcopy__'):
return x.__deepcopy__(memo)
if hasattr(x, '__getinitargs__'):
args = x.__getinitargs__()
args = deepcopy(args, memo)
y = x.__class__(*args)
else:
y = _EmptyClass()
y.__class__ = x.__class__
memo[id(x)] = y
if hasattr(x, '__getstate__'):
state = x.__getstate__()
else:
state = x.__dict__
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
y.__dict__.update(state)
return y
d[types.InstanceType] = _deepcopy_inst
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.iteritems():
setattr(y, key, value)
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
def _test():
l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
{'abc': 'ABC'}, (), [], {}]
l1 = copy(l)
print l1==l
l1 = map(copy, l)
print l1==l
l1 = deepcopy(l)
print l1==l
class C:
def __init__(self, arg=None):
self.a = 1
self.arg = arg
if __name__ == '__main__':
import sys
file = sys.argv[0]
else:
file = __file__
self.fp = open(file)
self.fp.close()
def __getstate__(self):
return {'a': self.a, 'arg': self.arg}
def __setstate__(self, state):
for key, value in state.iteritems():
setattr(self, key, value)
def __deepcopy__(self, memo=None):
new = self.__class__(deepcopy(self.arg, memo))
new.a = self.a
return new
c = C('argument sketch')
l.append(c)
l2 = copy(l)
print l == l2
print l
print l2
l2 = deepcopy(l)
print l == l2
print l
print l2
l.append({l[1]: l, 'xyz': l[2]})
l3 = copy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
l3 = deepcopy(l)
import repr
print map(repr.repr, l)
print map(repr.repr, l1)
print map(repr.repr, l2)
print map(repr.repr, l3)
if __name__ == '__main__':
_test()

View File

@ -1,200 +0,0 @@
"""Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError, "can't pickle %s objects" % base.__name__
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, basestring):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: cPickle grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError, "code out of range"
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.

View File

@ -1,418 +0,0 @@
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "excel", "excel_tab", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
def next(self):
row = self.reader.next()
if self.fieldnames is None:
self.fieldnames = row
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
for k in rowdict.keys():
if k not in self.fieldnames:
raise ValueError, "dict contains fields not in fieldnames"
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
doublequote = False
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
return (quotechar, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0

File diff suppressed because it is too large Load Diff

View File

@ -1,726 +0,0 @@
# $Id: dbexts.py 6638 2009-08-10 17:05:49Z fwierzbicki $
"""
This script provides platform independence by wrapping Python
Database API 2.0 compatible drivers to allow seamless database
usage across implementations.
In order to use the C version, you need mxODBC and mxDateTime.
In order to use the Java version, you need zxJDBC.
>>> import dbexts
>>> d = dbexts.dbexts() # use the default db
>>> d.isql('select count(*) count from player')
count
-------
13569.0
1 row affected
>>> r = d.raw('select count(*) count from player')
>>> r
([('count', 3, 17, None, 15, 0, 1)], [(13569.0,)])
>>>
The configuration file follows the following format in a file name dbexts.ini:
[default]
name=mysql
[jdbc]
name=mysql
url=jdbc:mysql://localhost/ziclix
user=
pwd=
driver=org.gjt.mm.mysql.Driver
datahandler=com.ziclix.python.sql.handler.MySQLDataHandler
[jdbc]
name=pg
url=jdbc:postgresql://localhost:5432/ziclix
user=bzimmer
pwd=
driver=org.postgresql.Driver
datahandler=com.ziclix.python.sql.handler.PostgresqlDataHandler
"""
import os, re
from types import StringType
__author__ = "brian zimmer (bzimmer@ziclix.com)"
__version__ = "$Revision: 6638 $"[11:-2]
__OS__ = os.name
choose = lambda bool, a, b: (bool and [a] or [b])[0]
def console(rows, headers=()):
"""Format the results into a list of strings (one for each row):
<header>
<headersep>
<row1>
<row2>
...
headers may be given as list of strings.
Columns are separated by colsep; the header is separated from
the result set by a line of headersep characters.
The function calls stringify to format the value data into a string.
It defaults to calling str() and striping leading and trailing whitespace.
- copied and modified from mxODBC
"""
# Check row entry lengths
output = []
headers = map(lambda header: header.upper(), list(map(lambda x: x or "", headers)))
collen = map(len,headers)
output.append(headers)
if rows and len(rows) > 0:
for row in rows:
row = map(lambda x: str(x), row)
for i in range(len(row)):
entry = row[i]
if collen[i] < len(entry):
collen[i] = len(entry)
output.append(row)
if len(output) == 1:
affected = "0 rows affected"
elif len(output) == 2:
affected = "1 row affected"
else:
affected = "%d rows affected" % (len(output) - 1)
# Format output
for i in range(len(output)):
row = output[i]
l = []
for j in range(len(row)):
l.append('%-*s' % (collen[j],row[j]))
output[i] = " | ".join(l)
# Insert header separator
totallen = len(output[0])
output[1:1] = ["-"*(totallen/len("-"))]
output.append("\n" + affected)
return output
def html(rows, headers=()):
output = []
output.append('<table class="results">')
output.append('<tr class="headers">')
headers = map(lambda x: '<td class="header">%s</td>' % (x.upper()), list(headers))
map(output.append, headers)
output.append('</tr>')
if rows and len(rows) > 0:
for row in rows:
output.append('<tr class="row">')
row = map(lambda x: '<td class="value">%s</td>' % (x), row)
map(output.append, row)
output.append('</tr>')
output.append('</table>')
return output
comments = lambda x: re.compile("{.*?}", re.S).sub("", x, 0)
class mxODBCProxy:
"""Wraps mxODBC to provide proxy support for zxJDBC's additional parameters."""
def __init__(self, c):
self.c = c
def __getattr__(self, name):
if name == "execute":
return self.execute
elif name == "gettypeinfo":
return self.gettypeinfo
else:
return getattr(self.c, name)
def execute(self, sql, params=None, bindings=None, maxrows=None):
if params:
self.c.execute(sql, params)
else:
self.c.execute(sql)
def gettypeinfo(self, typeid=None):
if typeid:
self.c.gettypeinfo(typeid)
class executor:
"""Handles the insertion of values given dynamic data."""
def __init__(self, table, cols):
self.cols = cols
self.table = table
if self.cols:
self.sql = "insert into %s (%s) values (%s)" % (table, ",".join(self.cols), ",".join(("?",) * len(self.cols)))
else:
self.sql = "insert into %s values (%%s)" % (table)
def execute(self, db, rows, bindings):
assert rows and len(rows) > 0, "must have at least one row"
if self.cols:
sql = self.sql
else:
sql = self.sql % (",".join(("?",) * len(rows[0])))
db.raw(sql, rows, bindings)
def connect(dbname):
return dbexts(dbname)
def lookup(dbname):
return dbexts(jndiname=dbname)
class dbexts:
def __init__(self, dbname=None, cfg=None, formatter=console, autocommit=0, jndiname=None, out=None):
self.verbose = 1
self.results = []
self.headers = []
self.autocommit = autocommit
self.formatter = formatter
self.out = out
self.lastrowid = None
self.updatecount = None
if not jndiname:
if cfg == None:
fn = os.path.join(os.path.split(__file__)[0], "dbexts.ini")
if not os.path.exists(fn):
fn = os.path.join(os.environ['HOME'], ".dbexts")
self.dbs = IniParser(fn)
elif isinstance(cfg, IniParser):
self.dbs = cfg
else:
self.dbs = IniParser(cfg)
if dbname == None: dbname = self.dbs[("default", "name")]
if __OS__ == 'java':
from com.ziclix.python.sql import zxJDBC
database = zxJDBC
if not jndiname:
t = self.dbs[("jdbc", dbname)]
self.dburl, dbuser, dbpwd, jdbcdriver = t['url'], t['user'], t['pwd'], t['driver']
if t.has_key('datahandler'):
self.datahandler = []
for dh in t['datahandler'].split(','):
classname = dh.split(".")[-1]
datahandlerclass = __import__(dh, globals(), locals(), classname)
self.datahandler.append(datahandlerclass)
keys = [x for x in t.keys() if x not in ['url', 'user', 'pwd', 'driver', 'datahandler', 'name']]
props = {}
for a in keys:
props[a] = t[a]
self.db = apply(database.connect, (self.dburl, dbuser, dbpwd, jdbcdriver), props)
else:
self.db = database.lookup(jndiname)
self.db.autocommit = self.autocommit
elif __OS__ == 'nt':
for modname in ["mx.ODBC.Windows", "ODBC.Windows"]:
try:
database = __import__(modname, globals(), locals(), "Windows")
break
except:
continue
else:
raise ImportError("unable to find appropriate mxODBC module")
t = self.dbs[("odbc", dbname)]
self.dburl, dbuser, dbpwd = t['url'], t['user'], t['pwd']
self.db = database.Connect(self.dburl, dbuser, dbpwd, clear_auto_commit=1)
self.dbname = dbname
for a in database.sqltype.keys():
setattr(self, database.sqltype[a], a)
for a in dir(database):
try:
p = getattr(database, a)
if issubclass(p, Exception):
setattr(self, a, p)
except:
continue
del database
def __str__(self):
return self.dburl
def __repr__(self):
return self.dburl
def __getattr__(self, name):
if "cfg" == name:
return self.dbs.cfg
raise AttributeError("'dbexts' object has no attribute '%s'" % (name))
def close(self):
""" close the connection to the database """
self.db.close()
def begin(self, style=None):
""" reset ivars and return a new cursor, possibly binding an auxiliary datahandler """
self.headers, self.results = [], []
if style:
c = self.db.cursor(style)
else:
c = self.db.cursor()
if __OS__ == 'java':
if hasattr(self, 'datahandler'):
for dh in self.datahandler:
c.datahandler = dh(c.datahandler)
else:
c = mxODBCProxy(c)
return c
def commit(self, cursor=None, close=1):
""" commit the cursor and create the result set """
if cursor and cursor.description:
self.headers = cursor.description
self.results = cursor.fetchall()
if hasattr(cursor, "nextset"):
s = cursor.nextset()
while s:
self.results += cursor.fetchall()
s = cursor.nextset()
if hasattr(cursor, "lastrowid"):
self.lastrowid = cursor.lastrowid
if hasattr(cursor, "updatecount"):
self.updatecount = cursor.updatecount
if not self.autocommit or cursor is None:
if not self.db.autocommit:
self.db.commit()
if cursor and close: cursor.close()
def rollback(self):
""" rollback the cursor """
self.db.rollback()
def prepare(self, sql):
""" prepare the sql statement """
cur = self.begin()
try:
return cur.prepare(sql)
finally:
self.commit(cur)
def display(self):
""" using the formatter, display the results """
if self.formatter and self.verbose > 0:
res = self.results
if res:
print >> self.out, ""
for a in self.formatter(res, map(lambda x: x[0], self.headers)):
print >> self.out, a
print >> self.out, ""
def __execute__(self, sql, params=None, bindings=None, maxrows=None):
""" the primary execution method """
cur = self.begin()
try:
if bindings:
cur.execute(sql, params, bindings, maxrows=maxrows)
elif params:
cur.execute(sql, params, maxrows=maxrows)
else:
cur.execute(sql, maxrows=maxrows)
finally:
self.commit(cur, close=isinstance(sql, StringType))
def isql(self, sql, params=None, bindings=None, maxrows=None):
""" execute and display the sql """
self.raw(sql, params, bindings, maxrows=maxrows)
self.display()
def raw(self, sql, params=None, bindings=None, delim=None, comments=comments, maxrows=None):
""" execute the sql and return a tuple of (headers, results) """
if delim:
headers = []
results = []
if type(sql) == type(StringType):
if comments: sql = comments(sql)
statements = filter(lambda x: len(x) > 0,
map(lambda statement: statement.strip(), sql.split(delim)))
else:
statements = [sql]
for a in statements:
self.__execute__(a, params, bindings, maxrows=maxrows)
headers.append(self.headers)
results.append(self.results)
self.headers = headers
self.results = results
else:
self.__execute__(sql, params, bindings, maxrows=maxrows)
return (self.headers, self.results)
def callproc(self, procname, params=None, bindings=None, maxrows=None):
""" execute a stored procedure """
cur = self.begin()
try:
cur.callproc(procname, params=params, bindings=bindings, maxrows=maxrows)
finally:
self.commit(cur)
self.display()
def pk(self, table, owner=None, schema=None):
""" display the table's primary keys """
cur = self.begin()
cur.primarykeys(schema, owner, table)
self.commit(cur)
self.display()
def fk(self, primary_table=None, foreign_table=None, owner=None, schema=None):
""" display the table's foreign keys """
cur = self.begin()
if primary_table and foreign_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, foreign_table)
elif primary_table:
cur.foreignkeys(schema, owner, primary_table, schema, owner, None)
elif foreign_table:
cur.foreignkeys(schema, owner, None, schema, owner, foreign_table)
self.commit(cur)
self.display()
def table(self, table=None, types=("TABLE",), owner=None, schema=None):
"""If no table argument, displays a list of all tables. If a table argument,
displays the columns of the given table."""
cur = self.begin()
if table:
cur.columns(schema, owner, table, None)
else:
cur.tables(schema, owner, None, types)
self.commit(cur)
self.display()
def proc(self, proc=None, owner=None, schema=None):
"""If no proc argument, displays a list of all procedures. If a proc argument,
displays the parameters of the given procedure."""
cur = self.begin()
if proc:
cur.procedurecolumns(schema, owner, proc, None)
else:
cur.procedures(schema, owner, None)
self.commit(cur)
self.display()
def stat(self, table, qualifier=None, owner=None, unique=0, accuracy=0):
""" display the table's indicies """
cur = self.begin()
cur.statistics(qualifier, owner, table, unique, accuracy)
self.commit(cur)
self.display()
def typeinfo(self, sqltype=None):
""" display the types available for the database """
cur = self.begin()
cur.gettypeinfo(sqltype)
self.commit(cur)
self.display()
def tabletypeinfo(self):
""" display the table types available for the database """
cur = self.begin()
cur.gettabletypeinfo()
self.commit(cur)
self.display()
def schema(self, table, full=0, sort=1, owner=None):
"""Displays a Schema object for the table. If full is true, then generates
references to the table in addition to the standard fields. If sort is true,
sort all the items in the schema, else leave them in db dependent order."""
print >> self.out, str(Schema(self, table, owner, full, sort))
def bulkcopy(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
"""Returns a Bulkcopy object using the given table."""
if type(dst) == type(""):
dst = dbexts(dst, cfg=self.dbs)
bcp = Bulkcopy(dst, table, include=include, exclude=exclude, autobatch=autobatch, executor=executor)
return bcp
def bcp(self, src, table, where='(1=1)', params=[], include=[], exclude=[], autobatch=0, executor=executor):
"""Bulkcopy of rows from a src database to the current database for a given table and where clause."""
if type(src) == type(""):
src = dbexts(src, cfg=self.dbs)
bcp = self.bulkcopy(self, table, include, exclude, autobatch, executor)
num = bcp.transfer(src, where, params)
return num
def unload(self, filename, sql, delimiter=",", includeheaders=1):
""" Unloads the delimited results of the query to the file specified, optionally including headers. """
u = Unload(self, filename, delimiter, includeheaders)
u.unload(sql)
class Bulkcopy:
"""The idea for a bcp class came from http://object-craft.com.au/projects/sybase"""
def __init__(self, dst, table, include=[], exclude=[], autobatch=0, executor=executor):
self.dst = dst
self.table = table
self.total = 0
self.rows = []
self.autobatch = autobatch
self.bindings = {}
include = map(lambda x: x.lower(), include)
exclude = map(lambda x: x.lower(), exclude)
_verbose = self.dst.verbose
self.dst.verbose = 0
try:
self.dst.table(self.table)
if self.dst.results:
colmap = {}
for a in self.dst.results:
colmap[a[3].lower()] = a[4]
cols = self.__filter__(colmap.keys(), include, exclude)
for a in zip(range(len(cols)), cols):
self.bindings[a[0]] = colmap[a[1]]
colmap = None
else:
cols = self.__filter__(include, include, exclude)
finally:
self.dst.verbose = _verbose
self.executor = executor(table, cols)
def __str__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __repr__(self):
return "[%s].[%s]" % (self.dst, self.table)
def __getattr__(self, name):
if name == 'columns':
return self.executor.cols
def __filter__(self, values, include, exclude):
cols = map(lambda col: col.lower(), values)
if exclude:
cols = filter(lambda x, ex=exclude: x not in ex, cols)
if include:
cols = filter(lambda x, inc=include: x in inc, cols)
return cols
def format(self, column, type):
self.bindings[column] = type
def done(self):
if len(self.rows) > 0:
return self.batch()
return 0
def batch(self):
self.executor.execute(self.dst, self.rows, self.bindings)
cnt = len(self.rows)
self.total += cnt
self.rows = []
return cnt
def rowxfer(self, line):
self.rows.append(line)
if self.autobatch: self.batch()
def transfer(self, src, where="(1=1)", params=[]):
sql = "select %s from %s where %s" % (", ".join(self.columns), self.table, where)
h, d = src.raw(sql, params)
if d:
map(self.rowxfer, d)
return self.done()
return 0
class Unload:
"""Unloads a sql statement to a file with optional formatting of each value."""
def __init__(self, db, filename, delimiter=",", includeheaders=1):
self.db = db
self.filename = filename
self.delimiter = delimiter
self.includeheaders = includeheaders
self.formatters = {}
def format(self, o):
if not o:
return ""
o = str(o)
if o.find(",") != -1:
o = "\"\"%s\"\"" % (o)
return o
def unload(self, sql, mode="w"):
headers, results = self.db.raw(sql)
w = open(self.filename, mode)
if self.includeheaders:
w.write("%s\n" % (self.delimiter.join(map(lambda x: x[0], headers))))
if results:
for a in results:
w.write("%s\n" % (self.delimiter.join(map(self.format, a))))
w.flush()
w.close()
class Schema:
"""Produces a Schema object which represents the database schema for a table"""
def __init__(self, db, table, owner=None, full=0, sort=1):
self.db = db
self.table = table
self.owner = owner
self.full = full
self.sort = sort
_verbose = self.db.verbose
self.db.verbose = 0
try:
if table: self.computeschema()
finally:
self.db.verbose = _verbose
def computeschema(self):
self.db.table(self.table, owner=self.owner)
self.columns = []
# (column name, type_name, size, nullable)
if self.db.results:
self.columns = map(lambda x: (x[3], x[5], x[6], x[10]), self.db.results)
if self.sort: self.columns.sort(lambda x, y: cmp(x[0], y[0]))
self.db.fk(None, self.table)
# (pk table name, pk column name, fk column name, fk name, pk name)
self.imported = []
if self.db.results:
self.imported = map(lambda x: (x[2], x[3], x[7], x[11], x[12]), self.db.results)
if self.sort: self.imported.sort(lambda x, y: cmp(x[2], y[2]))
self.exported = []
if self.full:
self.db.fk(self.table, None)
# (pk column name, fk table name, fk column name, fk name, pk name)
if self.db.results:
self.exported = map(lambda x: (x[3], x[6], x[7], x[11], x[12]), self.db.results)
if self.sort: self.exported.sort(lambda x, y: cmp(x[1], y[1]))
self.db.pk(self.table)
self.primarykeys = []
if self.db.results:
# (column name, key_seq, pk name)
self.primarykeys = map(lambda x: (x[3], x[4], x[5]), self.db.results)
if self.sort: self.primarykeys.sort(lambda x, y: cmp(x[1], y[1]))
try:
self.indices = None
self.db.stat(self.table)
self.indices = []
# (non-unique, name, type, pos, column name, asc)
if self.db.results:
idxdict = {}
# mxODBC returns a row of None's, so filter it out
idx = map(lambda x: (x[3], x[5].strip(), x[6], x[7], x[8]), filter(lambda x: x[5], self.db.results))
def cckmp(x, y):
c = cmp(x[1], y[1])
if c == 0: c = cmp(x[3], y[3])
return c
# sort this regardless, this gets the indicies lined up
idx.sort(cckmp)
for a in idx:
if not idxdict.has_key(a[1]):
idxdict[a[1]] = []
idxdict[a[1]].append(a)
self.indices = idxdict.values()
if self.sort: self.indices.sort(lambda x, y: cmp(x[0][1], y[0][1]))
except:
pass
def __str__(self):
d = []
d.append("Table")
d.append(" " + self.table)
d.append("\nPrimary Keys")
for a in self.primarykeys:
d.append(" %s {%s}" % (a[0], a[2]))
d.append("\nImported (Foreign) Keys")
for a in self.imported:
d.append(" %s (%s.%s) {%s}" % (a[2], a[0], a[1], a[3]))
if self.full:
d.append("\nExported (Referenced) Keys")
for a in self.exported:
d.append(" %s (%s.%s) {%s}" % (a[0], a[1], a[2], a[3]))
d.append("\nColumns")
for a in self.columns:
nullable = choose(a[3], "nullable", "non-nullable")
d.append(" %-20s %s(%s), %s" % (a[0], a[1], a[2], nullable))
d.append("\nIndices")
if self.indices is None:
d.append(" (failed)")
else:
for a in self.indices:
unique = choose(a[0][0], "non-unique", "unique")
cname = ", ".join(map(lambda x: x[4], a))
d.append(" %s index {%s} on (%s)" % (unique, a[0][1], cname))
return "\n".join(d)
class IniParser:
def __init__(self, cfg, key='name'):
self.key = key
self.records = {}
self.ctypeRE = re.compile("\[(jdbc|odbc|default)\]")
self.entryRE = re.compile("([a-zA-Z]+)[ \t]*=[ \t]*(.*)")
self.cfg = cfg
self.parse()
def parse(self):
fp = open(self.cfg, "r")
data = fp.readlines()
fp.close()
lines = filter(lambda x: len(x) > 0 and x[0] not in ['#', ';'], map(lambda x: x.strip(), data))
current = None
for i in range(len(lines)):
line = lines[i]
g = self.ctypeRE.match(line)
if g: # a section header
current = {}
if not self.records.has_key(g.group(1)):
self.records[g.group(1)] = []
self.records[g.group(1)].append(current)
else:
g = self.entryRE.match(line)
if g:
current[g.group(1)] = g.group(2)
def __getitem__(self, (ctype, skey)):
if skey == self.key: return self.records[ctype][0][skey]
t = filter(lambda x, p=self.key, s=skey: x[p] == s, self.records[ctype])
if not t or len(t) > 1:
raise KeyError, "invalid key ('%s', '%s')" % (ctype, skey)
return t[0]
def random_table_name(prefix, num_chars):
import random
d = [prefix, '_']
i = 0
while i < num_chars:
d.append(chr(int(100 * random.random()) % 26 + ord('A')))
i += 1
return "".join(d)
class ResultSetRow:
def __init__(self, rs, row):
self.row = row
self.rs = rs
def __getitem__(self, i):
if type(i) == type(""):
i = self.rs.index(i)
return self.row[i]
def __getslice__(self, i, j):
if type(i) == type(""): i = self.rs.index(i)
if type(j) == type(""): j = self.rs.index(j)
return self.row[i:j]
def __len__(self):
return len(self.row)
def __repr__(self):
return str(self.row)
class ResultSet:
def __init__(self, headers, results=[]):
self.headers = map(lambda x: x.upper(), headers)
self.results = results
def index(self, i):
return self.headers.index(i.upper())
def __getitem__(self, i):
return ResultSetRow(self, self.results[i])
def __getslice__(self, i, j):
return map(lambda x, rs=self: ResultSetRow(rs, x), self.results[i:j])
def __repr__(self):
return "<%s instance {cols [%d], rows [%d]} at %s>" % (self.__class__, len(self.headers), len(self.results), id(self))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +0,0 @@
"""Read and cache directory listings.
The listdir() routine returns a sorted list of the files in a directory,
using a cache to avoid reading the directory more often than necessary.
The annotate() routine appends slashes to directories."""
import os
__all__ = ["listdir", "opendir", "annotate", "reset"]
cache = {}
def reset():
"""Reset the cache completely."""
global cache
cache = {}
def listdir(path):
"""List directory contents, using cache."""
try:
cached_mtime, list = cache[path]
del cache[path]
except KeyError:
cached_mtime, list = -1, []
mtime = os.stat(path).st_mtime
if mtime != cached_mtime:
list = os.listdir(path)
list.sort()
cache[path] = mtime, list
return list
opendir = listdir # XXX backward compatibility
def annotate(head, list):
"""Add '/' suffixes to directories."""
for i in range(len(list)):
if os.path.isdir(os.path.join(head, list[i])):
list[i] = list[i] + '/'

View File

@ -1,223 +0,0 @@
"""Disassembler of Python byte code into mnemonics."""
import sys
import types
from opcode import *
from opcode import __all__ as _opcodes_all
__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all
del _opcodes_all
def dis(x=None):
"""Disassemble classes, methods, functions, or code.
With no argument, disassemble the last traceback.
"""
if x is None:
distb()
return
if type(x) is types.InstanceType:
x = x.__class__
if hasattr(x, 'im_func'):
x = x.im_func
if hasattr(x, 'func_code'):
x = x.func_code
if hasattr(x, '__dict__'):
items = x.__dict__.items()
items.sort()
for name, x1 in items:
if type(x1) in (types.MethodType,
types.FunctionType,
types.CodeType,
types.ClassType):
print "Disassembly of %s:" % name
try:
dis(x1)
except TypeError, msg:
print "Sorry:", msg
print
elif hasattr(x, 'co_code'):
disassemble(x)
elif isinstance(x, str):
disassemble_string(x)
else:
raise TypeError, \
"don't know how to disassemble %s objects" % \
type(x).__name__
def distb(tb=None):
"""Disassemble a traceback (default: last traceback)."""
if tb is None:
try:
tb = sys.last_traceback
except AttributeError:
raise RuntimeError, "no last traceback to disassemble"
while tb.tb_next: tb = tb.tb_next
disassemble(tb.tb_frame.f_code, tb.tb_lasti)
def disassemble(co, lasti=-1):
"""Disassemble a code object."""
code = co.co_code
labels = findlabels(code)
linestarts = dict(findlinestarts(co))
n = len(code)
i = 0
extended_arg = 0
free = None
while i < n:
c = code[i]
op = ord(c)
if i in linestarts:
if i > 0:
print
print "%3d" % linestarts[i],
else:
print ' ',
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(20),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
print repr(oparg).rjust(5),
if op in hasconst:
print '(' + repr(co.co_consts[oparg]) + ')',
elif op in hasname:
print '(' + co.co_names[oparg] + ')',
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
print '(' + co.co_varnames[oparg] + ')',
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
print '(' + free[oparg] + ')',
print
def disassemble_string(code, lasti=-1, varnames=None, names=None,
constants=None):
labels = findlabels(code)
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
if i == lasti: print '-->',
else: print ' ',
if i in labels: print '>>',
else: print ' ',
print repr(i).rjust(4),
print opname[op].ljust(15),
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
print repr(oparg).rjust(5),
if op in hasconst:
if constants:
print '(' + repr(constants[oparg]) + ')',
else:
print '(%d)'%oparg,
elif op in hasname:
if names is not None:
print '(' + names[oparg] + ')',
else:
print '(%d)'%oparg,
elif op in hasjrel:
print '(to ' + repr(i + oparg) + ')',
elif op in haslocal:
if varnames:
print '(' + varnames[oparg] + ')',
else:
print '(%d)' % oparg,
elif op in hascompare:
print '(' + cmp_op[oparg] + ')',
print
disco = disassemble # XXX For backwards compatibility
def findlabels(code):
"""Detect all offsets in a byte code which are jump targets.
Return the list of offsets.
"""
labels = []
n = len(code)
i = 0
while i < n:
c = code[i]
op = ord(c)
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256
i = i+2
label = -1
if op in hasjrel:
label = i+oparg
elif op in hasjabs:
label = oparg
if label >= 0:
if label not in labels:
labels.append(label)
return labels
def findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source.
Generate pairs (offset, lineno) as described in Python/compile.c.
"""
byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
line_increments = [ord(c) for c in code.co_lnotab[1::2]]
lastlineno = None
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if lineno != lastlineno:
yield (addr, lineno)
lastlineno = lineno
addr += byte_incr
lineno += line_incr
if lineno != lastlineno:
yield (addr, lineno)
def _test():
"""Simple test program to disassemble a file."""
if sys.argv[1:]:
if sys.argv[2:]:
sys.stderr.write("usage: python dis.py [-|file]\n")
sys.exit(2)
fn = sys.argv[1]
if not fn or fn == "-":
fn = None
else:
fn = None
if fn is None:
f = sys.stdin
else:
f = open(fn)
source = f.read()
if fn is not None:
f.close()
else:
fn = "<stdin>"
code = compile(source, fn, "exec")
dis(code)
if __name__ == "__main__":
_test()

View File

@ -1,22 +0,0 @@
This directory contains only a subset of the Distutils, specifically
the Python modules in the 'distutils' and 'distutils.command'
packages. This is all you need to distribute and install Python
modules using the Distutils. There is also a separately packaged
standalone version of the Distutils available for people who want to
upgrade the Distutils without upgrading Python, available from the
Distutils web page:
http://www.python.org/sigs/distutils-sig/
The standalone version includes all of the code in this directory,
plus documentation, test scripts, examples, etc.
The Distutils documentation is divided into two documents, "Installing
Python Modules", which explains how to install Python packages, and
"Distributing Python Modules", which explains how to write setup.py
files. Both documents are part of the standard Python documentation
set, and are available from http://www.python.org/doc/current/ .
Greg Ward (gward@python.net)
$Id: README 29650 2002-11-13 13:26:59Z akuchling $

View File

@ -1,23 +0,0 @@
"""distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: __init__.py 54641 2007-03-31 21:02:43Z marc-andre.lemburg $"
# Distutils version
#
# Please coordinate with Marc-Andre Lemburg <mal@egenix.com> when adding
# new features to distutils that would warrant bumping the version number.
#
# In general, major and minor version should loosely follow the Python
# version number the distutils code was shipped with.
#
__version__ = "2.5.1"

View File

@ -1,173 +0,0 @@
"""distutils.archive_util
Utility functions for creating archive files (tarballs, zip files,
that sort of thing)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: archive_util.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
from distutils.dir_util import mkpath
from distutils import log
def make_tarball (base_name, base_dir, compress="gzip",
verbose=0, dry_run=0):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'. 'compress' must be "gzip" (the default), "compress",
"bzip2", or None. Both "tar" and the compression utility named by
'compress' must be on the default program search path, so this is
probably Unix-specific. The output tar file will be named 'base_dir' +
".tar", possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
# XXX GNU tar 1.13 has a nifty option to add a prefix directory.
# It's pretty new, though, so we certainly can't require it --
# but it would be nice to take advantage of it to skip the
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
compress_ext = { 'gzip': ".gz",
'bzip2': '.bz2',
'compress': ".Z" }
# flags for compression program, each element of list will be an argument
compress_flags = {'gzip': ["-f9"],
'compress': ["-f"],
'bzip2': ['-f9']}
if compress is not None and compress not in compress_ext.keys():
raise ValueError, \
"bad value for 'compress': must be None, 'gzip', or 'compress'"
archive_name = base_name + ".tar"
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
cmd = ["tar", "-cf", archive_name, base_dir]
spawn(cmd, dry_run=dry_run)
if compress:
spawn([compress] + compress_flags[compress] + [archive_name],
dry_run=dry_run)
return archive_name + compress_ext[compress]
else:
return archive_name
# make_tarball ()
def make_zipfile (base_name, base_dir, verbose=0, dry_run=0):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
try:
import zipfile
except ImportError:
zipfile = None
zip_filename = base_name + ".zip"
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
# If zipfile module is not available, try spawning an external
# 'zip' command.
if zipfile is None:
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
try:
spawn(["zip", zipoptions, zip_filename, base_dir],
dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise DistutilsExecError, \
("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
else:
log.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
def visit (z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
z.write(path, path)
log.info("adding '%s'" % path)
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
os.path.walk(base_dir, visit, z)
z.close()
return zip_filename
# make_zipfile ()
ARCHIVE_FORMATS = {
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (make_zipfile, [],"ZIP file")
}
def check_archive_formats (formats):
for format in formats:
if not ARCHIVE_FORMATS.has_key(format):
return format
else:
return None
def make_archive (base_name, format,
root_dir=None, base_dir=None,
verbose=0, dry_run=0):
"""Create an archive file (eg. zip or tar). 'base_name' is the name
of the file to create, minus any format-specific extension; 'format'
is the archive format: one of "zip", "tar", "ztar", or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
"""
save_cwd = os.getcwd()
if root_dir is not None:
log.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = { 'dry_run': dry_run }
try:
format_info = ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError, "unknown archive format '%s'" % format
func = format_info[0]
for (arg,val) in format_info[1]:
kwargs[arg] = val
filename = apply(func, (base_name, base_dir), kwargs)
if root_dir is not None:
log.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
# make_archive ()

View File

@ -1,398 +0,0 @@
"""distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()

View File

@ -1,478 +0,0 @@
"""distutils.cmd
Provides the Command class, the base class for the command classes
in the distutils.command package.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cmd.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os, string, re
from types import *
from distutils.errors import *
from distutils import util, dir_util, file_util, archive_util, dep_util
from distutils import log
class Command:
"""Abstract base class for defining command classes, the "worker bees"
of the Distutils. A useful analogy for command classes is to think of
them as subroutines with local variables called "options". The options
are "declared" in 'initialize_options()' and "defined" (given their
final values, aka "finalized") in 'finalize_options()', both of which
must be defined by every command class. The distinction between the
two is necessary because option values might come from the outside
world (command line, config file, ...), and any options dependent on
other options must be computed *after* these outside influences have
been processed -- hence 'finalize_options()'. The "body" of the
subroutine, where it does all its work based on the values of its
options, is the 'run()' method, which must also be implemented by every
command class.
"""
# 'sub_commands' formalizes the notion of a "family" of commands,
# eg. "install" as the parent with sub-commands "install_lib",
# "install_headers", etc. The parent of a family of commands
# defines 'sub_commands' as a class attribute; it's a list of
# (command_name : string, predicate : unbound_method | string | None)
# tuples, where 'predicate' is a method of the parent command that
# determines whether the corresponding command is applicable in the
# current situation. (Eg. we "install_headers" is only applicable if
# we have any C header files to install.) If 'predicate' is None,
# that command is always applicable.
#
# 'sub_commands' is usually defined at the *end* of a class, because
# predicates can be unbound methods, so they must already have been
# defined. The canonical example is the "install" command.
sub_commands = []
# -- Creation/initialization methods -------------------------------
def __init__ (self, dist):
"""Create and initialize a new Command object. Most importantly,
invokes the 'initialize_options()' method, which is the real
initializer and depends on the actual command being
instantiated.
"""
# late import because of mutual dependence between these classes
from distutils.dist import Distribution
if not isinstance(dist, Distribution):
raise TypeError, "dist must be a Distribution instance"
if self.__class__ is Command:
raise RuntimeError, "Command is an abstract class"
self.distribution = dist
self.initialize_options()
# Per-command versions of the global flags, so that the user can
# customize Distutils' behaviour command-by-command and let some
# commands fall back on the Distribution's behaviour. None means
# "not defined, check self.distribution's copy", while 0 or 1 mean
# false and true (duh). Note that this means figuring out the real
# value of each flag is a touch complicated -- hence "self._dry_run"
# will be handled by __getattr__, below.
# XXX This needs to be fixed.
self._dry_run = None
# verbose is largely ignored, but needs to be set for
# backwards compatibility (I think)?
self.verbose = dist.verbose
# Some commands define a 'self.force' option to ignore file
# timestamps, but methods defined *here* assume that
# 'self.force' exists for all commands. So define it here
# just to be safe.
self.force = None
# The 'help' flag is just used for command-line parsing, so
# none of that complicated bureaucracy is needed.
self.help = 0
# 'finalized' records whether or not 'finalize_options()' has been
# called. 'finalize_options()' itself should not pay attention to
# this flag: it is the business of 'ensure_finalized()', which
# always calls 'finalize_options()', to respect/update it.
self.finalized = 0
# __init__ ()
# XXX A more explicit way to customize dry_run would be better.
def __getattr__ (self, attr):
if attr == 'dry_run':
myval = getattr(self, "_" + attr)
if myval is None:
return getattr(self.distribution, attr)
else:
return myval
else:
raise AttributeError, attr
def ensure_finalized (self):
if not self.finalized:
self.finalize_options()
self.finalized = 1
# Subclasses must define:
# initialize_options()
# provide default values for all options; may be customized by
# setup script, by options from config file(s), or by command-line
# options
# finalize_options()
# decide on the final values for all options; this is called
# after all possible intervention from the outside world
# (command-line, option file, etc.) has been processed
# run()
# run the command: do whatever it is we're here to do,
# controlled by the command's various option values
def initialize_options (self):
"""Set default values for all the options that this command
supports. Note that these defaults may be overridden by other
commands, by the setup script, by config files, or by the
command-line. Thus, this is not the place to code dependencies
between options; generally, 'initialize_options()' implementations
are just a bunch of "self.foo = None" assignments.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def finalize_options (self):
"""Set final values for all the options that this command supports.
This is always called as late as possible, ie. after any option
assignments from the command-line or from other commands have been
done. Thus, this is the place to code option dependencies: if
'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
long as 'foo' still has the same value it was assigned in
'initialize_options()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def dump_options (self, header=None, indent=""):
from distutils.fancy_getopt import longopt_xlate
if header is None:
header = "command options for '%s':" % self.get_command_name()
print indent + header
indent = indent + " "
for (option, _, _) in self.user_options:
option = string.translate(option, longopt_xlate)
if option[-1] == "=":
option = option[:-1]
value = getattr(self, option)
print indent + "%s = %s" % (option, value)
def run (self):
"""A command's raison d'etre: carry out the action it exists to
perform, controlled by the options initialized in
'initialize_options()', customized by other commands, the setup
script, the command-line, and config files, and finalized in
'finalize_options()'. All terminal output and filesystem
interaction should be done by 'run()'.
This method must be implemented by all command classes.
"""
raise RuntimeError, \
"abstract method -- subclass %s must override" % self.__class__
def announce (self, msg, level=1):
"""If the current verbosity level is of greater than or equal to
'level' print 'msg' to stdout.
"""
log.log(level, msg)
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print msg
sys.stdout.flush()
# -- Option validation methods -------------------------------------
# (these are very handy in writing the 'finalize_options()' method)
#
# NB. the general philosophy here is to ensure that a particular option
# value meets certain type and value constraints. If not, we try to
# force it into conformance (eg. if we expect a list but have a string,
# split the string on comma and/or whitespace). If we can't force the
# option into conformance, raise DistutilsOptionError. Thus, command
# classes need do nothing more than (eg.)
# self.ensure_string_list('foo')
# and they can be guaranteed that thereafter, self.foo will be
# a list of strings.
def _ensure_stringlike (self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif type(val) is not StringType:
raise DistutilsOptionError, \
"'%s' must be a %s (got `%s`)" % (option, what, val)
return val
def ensure_string (self, option, default=None):
"""Ensure that 'option' is a string; if not defined, set it to
'default'.
"""
self._ensure_stringlike(option, "string", default)
def ensure_string_list (self, option):
"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif type(val) is StringType:
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if type(val) is ListType:
types = map(type, val)
ok = (types == [StringType] * len(val))
else:
ok = 0
if not ok:
raise DistutilsOptionError, \
"'%s' must be a list of strings (got %r)" % \
(option, val)
def _ensure_tested_string (self, option, tester,
what, error_fmt, default=None):
val = self._ensure_stringlike(option, what, default)
if val is not None and not tester(val):
raise DistutilsOptionError, \
("error in '%s' option: " + error_fmt) % (option, val)
def ensure_filename (self, option):
"""Ensure that 'option' is the name of an existing file."""
self._ensure_tested_string(option, os.path.isfile,
"filename",
"'%s' does not exist or is not a file")
def ensure_dirname (self, option):
self._ensure_tested_string(option, os.path.isdir,
"directory name",
"'%s' does not exist or is not a directory")
# -- Convenience methods for commands ------------------------------
def get_command_name (self):
if hasattr(self, 'command_name'):
return self.command_name
else:
return self.__class__.__name__
def set_undefined_options (self, src_cmd, *option_pairs):
"""Set the values of any "undefined" options from corresponding
option values in some other command object. "Undefined" here means
"is None", which is the convention used to indicate that an option
has not been changed between 'initialize_options()' and
'finalize_options()'. Usually called from 'finalize_options()' for
options that depend on some other command rather than another
option of the same command. 'src_cmd' is the other command from
which option values will be taken (a command object will be created
for it if necessary); the remaining arguments are
'(src_option,dst_option)' tuples which mean "take the value of
'src_option' in the 'src_cmd' command object, and copy it to
'dst_option' in the current command object".
"""
# Option_pairs: list of (src_option, dst_option) tuples
src_cmd_obj = self.distribution.get_command_obj(src_cmd)
src_cmd_obj.ensure_finalized()
for (src_option, dst_option) in option_pairs:
if getattr(self, dst_option) is None:
setattr(self, dst_option,
getattr(src_cmd_obj, src_option))
def get_finalized_command (self, command, create=1):
"""Wrapper around Distribution's 'get_command_obj()' method: find
(create if necessary and 'create' is true) the command object for
'command', call its 'ensure_finalized()' method, and return the
finalized command object.
"""
cmd_obj = self.distribution.get_command_obj(command, create)
cmd_obj.ensure_finalized()
return cmd_obj
# XXX rename to 'get_reinitialized_command()'? (should do the
# same in dist.py, if so)
def reinitialize_command (self, command, reinit_subcommands=0):
return self.distribution.reinitialize_command(
command, reinit_subcommands)
def run_command (self, command):
"""Run some other command: uses the 'run_command()' method of
Distribution, which creates and finalizes the command object if
necessary and then invokes its 'run()' method.
"""
self.distribution.run_command(command)
def get_sub_commands (self):
"""Determine the sub-commands that are relevant in the current
distribution (ie., that need to be run). This is based on the
'sub_commands' class attribute: each tuple in that list may include
a method that we call to determine if the subcommand needs to be
run for the current distribution. Return a list of command names.
"""
commands = []
for (cmd_name, method) in self.sub_commands:
if method is None or method(self):
commands.append(cmd_name)
return commands
# -- External world manipulation -----------------------------------
def warn (self, msg):
sys.stderr.write("warning: %s: %s\n" %
(self.get_command_name(), msg))
def execute (self, func, args, msg=None, level=1):
util.execute(func, args, msg, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
dir_util.mkpath(name, mode, dry_run=self.dry_run)
def copy_file (self, infile, outfile,
preserve_mode=1, preserve_times=1, link=None, level=1):
"""Copy a file respecting verbose, dry-run and force flags. (The
former two default to whatever is in the Distribution object, and
the latter defaults to false for commands that don't define it.)"""
return file_util.copy_file(
infile, outfile,
preserve_mode, preserve_times,
not self.force,
link,
dry_run=self.dry_run)
def copy_tree (self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
"""
return dir_util.copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run)
def move_file (self, src, dst, level=1):
"""Move a file respectin dry-run flag."""
return file_util.move_file(src, dst, dry_run = self.dry_run)
def spawn (self, cmd, search_path=1, level=1):
"""Spawn an external command respecting dry-run flag."""
from distutils.spawn import spawn
spawn(cmd, search_path, dry_run= self.dry_run)
def make_archive (self, base_name, format,
root_dir=None, base_dir=None):
return archive_util.make_archive(
base_name, format, root_dir, base_dir, dry_run=self.dry_run)
def make_file (self, infiles, outfile, func, args,
exec_msg=None, skip_msg=None, level=1):
"""Special case of 'execute()' for operations that process one or
more input files and generate one output file. Works just like
'execute()', except the operation is skipped and a different
message printed if 'outfile' already exists and is newer than all
files listed in 'infiles'. If the command defined 'self.force',
and it is true, then the command is unconditionally run -- does no
timestamp checks.
"""
if exec_msg is None:
exec_msg = "generating %s from %s" % \
(outfile, string.join(infiles, ', '))
if skip_msg is None:
skip_msg = "skipping %s (inputs unchanged)" % outfile
# Allow 'infiles' to be a single string
if type(infiles) is StringType:
infiles = (infiles,)
elif type(infiles) not in (ListType, TupleType):
raise TypeError, \
"'infiles' must be a string, or a list or tuple of strings"
# If 'outfile' must be regenerated (either because it doesn't
# exist, is out-of-date, or the 'force' flag is true) then
# perform the action that presumably regenerates it
if self.force or dep_util.newer_group (infiles, outfile):
self.execute(func, args, exec_msg, level)
# Otherwise, print the "skip" message
else:
log.debug(skip_msg)
# make_file ()
# class Command
# XXX 'install_misc' class not currently used -- it was the base class for
# both 'install_scripts' and 'install_data', but they outgrew it. It might
# still be useful for 'install_headers', though, so I'm keeping it around
# for the time being.
class install_misc (Command):
"""Common base class for installing some files in a subdirectory.
Currently used by install_data and install_scripts.
"""
user_options = [('install-dir=', 'd', "directory to install the files to")]
def initialize_options (self):
self.install_dir = None
self.outfiles = []
def _install_dir_from (self, dirname):
self.set_undefined_options('install', (dirname, 'install_dir'))
def _copy_files (self, filelist):
self.outfiles = []
if not filelist:
return
self.mkpath(self.install_dir)
for f in filelist:
self.copy_file(f, self.install_dir)
self.outfiles.append(os.path.join(self.install_dir, f))
def get_outputs (self):
return self.outfiles
if __name__ == "__main__":
print "ok"

View File

@ -1,33 +0,0 @@
"""distutils.command
Package containing implementation of all the standard Distutils
commands."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: __init__.py 37828 2004-11-10 22:23:15Z loewis $"
__all__ = ['build',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'clean',
'install',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
'sdist',
'register',
'bdist',
'bdist_dumb',
'bdist_rpm',
'bdist_wininst',
# These two are reserved for future use:
#'bdist_sdux',
#'bdist_pkgtool',
# Note:
# bdist_packager is not included because it only provides
# an abstract base class
]

View File

@ -1,151 +0,0 @@
"""distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist.py 37828 2004-11-10 22:23:15Z loewis $"
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats ():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats=[]
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist (Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = ['skip-build']
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',
#'bdist_sdux', 'bdist_pkgtool'
)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = { 'posix': 'gztar',
'java': 'gztar',
'nt': 'zip',
'os2': 'zip', }
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip',
#'pkgtool', 'sdux'
]
# And the real information.
format_command = { 'rpm': ('bdist_rpm', "RPM distribution"),
'zip': ('bdist_dumb', "ZIP file"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
#'pkgtool': ('bdist_pkgtool',
# "Solaris pkgtool distribution"),
#'sdux': ('bdist_sdux', "HP-UX swinstall depot"),
}
def initialize_options (self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
self.skip_build = 0
# initialize_options()
def finalize_options (self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
self.plat_name = get_platform()
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create built distributions " + \
"on platform %s" % os.name
if self.dist_dir is None:
self.dist_dir = "dist"
# finalize_options()
def run (self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError, "invalid format '%s'" % format
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
# run()
# class bdist

View File

@ -1,136 +0,0 @@
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_dumb.py 38697 2005-03-23 18:54:36Z loewis $"
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb (Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'java': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
# class bdist_dumb

View File

@ -1,639 +0,0 @@
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2005, 2006 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
# The bdist_wininst command proper
# based on bdist_wininst
"""
Implements the bdist_msi command.
"""
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree
from distutils.sysconfig import get_python_version
from distutils.version import StrictVersion
from distutils.errors import DistutilsOptionError
from distutils import log
import msilib
from msilib import schema, sequence, text
from msilib import Directory, Feature, Dialog, add_data
class PyDialog(Dialog):
"""Dialog class with a fixed layout: controls at the top, then a ruler,
then a list of buttons: back, next, cancel. Optionally a bitmap at the
left."""
def __init__(self, *args, **kw):
"""Dialog(database, name, x, y, w, h, attributes, title, first,
default, cancel, bitmap=true)"""
Dialog.__init__(self, *args)
ruler = self.h - 36
bmwidth = 152*ruler/328
#if kw.get("bitmap", True):
# self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
self.line("BottomLine", 0, ruler, self.w, 0)
def title(self, title):
"Set the title text of the dialog at the top."
# name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
# text, in VerdanaBold10
self.text("Title", 15, 10, 320, 60, 0x30003,
r"{\VerdanaBold10}%s" % title)
def back(self, title, next, name = "Back", active = 1):
"""Add a back button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
def cancel(self, title, next, name = "Cancel", active = 1):
"""Add a cancel button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
def next(self, title, next, name = "Next", active = 1):
"""Add a Next button with a given title, the tab-next button,
its name in the Control table, possibly initially disabled.
Return the button, so that events can be associated"""
if active:
flags = 3 # Visible|Enabled
else:
flags = 1 # Visible
return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
def xbutton(self, name, title, next, xpos):
"""Add a button with a given title, the tab-next button,
its name in the Control table, giving its x position; the
y-position is aligned with the other buttons.
Return the button, so that events can be associated"""
return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
class bdist_msi (Command):
description = "create a Microsoft Installer (.msi) binary distribution"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'msi')
short_version = get_python_version()
if self.target_version:
if not self.skip_build and self.distribution.has_ext_modules()\
and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
else:
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.pre_install_script:
raise DistutilsOptionError, "the pre-install-script feature is not yet implemented"
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
self.install_script_key = None
# finalize_options()
def run (self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.prefix = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (get_platform(), target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
self.mkpath(self.dist_dir)
fullname = self.distribution.get_fullname()
installer_name = self.get_installer_filename(fullname)
installer_name = os.path.abspath(installer_name)
if os.path.exists(installer_name): os.unlink(installer_name)
metadata = self.distribution.metadata
author = metadata.author
if not author:
author = metadata.maintainer
if not author:
author = "UNKNOWN"
version = metadata.get_version()
# ProductVersion must be strictly numeric
# XXX need to deal with prerelease versions
sversion = "%d.%d.%d" % StrictVersion(version).version
# Prefix ProductName with Python x.y, so that
# it sorts together with the other Python packages
# in Add-Remove-Programs (APR)
product_name = "Python %s %s" % (self.target_version,
self.distribution.get_fullname())
self.db = msilib.init_database(installer_name, schema,
product_name, msilib.gen_uuid(),
sversion, author)
msilib.add_tables(self.db, sequence)
props = [('DistVersion', version)]
email = metadata.author_email or metadata.maintainer_email
if email:
props.append(("ARPCONTACT", email))
if metadata.url:
props.append(("ARPURLINFOABOUT", metadata.url))
if props:
add_data(self.db, 'Property', props)
self.add_find_python()
self.add_files()
self.add_scripts()
self.add_ui()
self.db.Commit()
if hasattr(self.distribution, 'dist_files'):
self.distribution.dist_files.append(('bdist_msi', self.target_version, fullname))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
def add_files(self):
db = self.db
cab = msilib.CAB("distfiles")
f = Feature(db, "default", "Default Feature", "Everything", 1, directory="TARGETDIR")
f.set_current()
rootdir = os.path.abspath(self.bdist_dir)
root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
db.Commit()
todo = [root]
while todo:
dir = todo.pop()
for file in os.listdir(dir.absolute):
afile = os.path.join(dir.absolute, file)
if os.path.isdir(afile):
newdir = Directory(db, cab, dir, file, file, "%s|%s" % (dir.make_short(file), file))
todo.append(newdir)
else:
key = dir.add_file(file)
if file==self.install_script:
if self.install_script_key:
raise DistutilsOptionError, "Multiple files with name %s" % file
self.install_script_key = '[#%s]' % key
cab.commit(db)
def add_find_python(self):
"""Adds code to the installer to compute the location of Python.
Properties PYTHON.MACHINE, PYTHON.USER, PYTHONDIR and PYTHON will be set
in both the execute and UI sequences; PYTHONDIR will be set from
PYTHON.USER if defined, else from PYTHON.MACHINE.
PYTHON is PYTHONDIR\python.exe"""
install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % self.target_version
add_data(self.db, "RegLocator",
[("python.machine", 2, install_path, None, 2),
("python.user", 1, install_path, None, 2)])
add_data(self.db, "AppSearch",
[("PYTHON.MACHINE", "python.machine"),
("PYTHON.USER", "python.user")])
add_data(self.db, "CustomAction",
[("PythonFromMachine", 51+256, "PYTHONDIR", "[PYTHON.MACHINE]"),
("PythonFromUser", 51+256, "PYTHONDIR", "[PYTHON.USER]"),
("PythonExe", 51+256, "PYTHON", "[PYTHONDIR]\\python.exe"),
("InitialTargetDir", 51+256, "TARGETDIR", "[PYTHONDIR]")])
add_data(self.db, "InstallExecuteSequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
add_data(self.db, "InstallUISequence",
[("PythonFromMachine", "PYTHON.MACHINE", 401),
("PythonFromUser", "PYTHON.USER", 402),
("PythonExe", None, 403),
("InitialTargetDir", 'TARGETDIR=""', 404),
])
def add_scripts(self):
if self.install_script:
add_data(self.db, "CustomAction",
[("install_script", 50, "PYTHON", self.install_script_key)])
add_data(self.db, "InstallExecuteSequence",
[("install_script", "NOT Installed", 6800)])
if self.pre_install_script:
scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
f = open(scriptfn, "w")
# The batch file will be executed with [PYTHON], so that %1
# is the path to the Python interpreter; %0 will be the path
# of the batch file.
# rem ="""
# %1 %0
# exit
# """
# <actual script>
f.write('rem ="""\n%1 %0\nexit\n"""\n')
f.write(open(self.pre_install_script).read())
f.close()
add_data(self.db, "Binary",
[("PreInstall", msilib.Binary(scriptfn))
])
add_data(self.db, "CustomAction",
[("PreInstall", 2, "PreInstall", None)
])
add_data(self.db, "InstallExecuteSequence",
[("PreInstall", "NOT Installed", 450)])
def add_ui(self):
db = self.db
x = y = 50
w = 370
h = 300
title = "[ProductName] Setup"
# see "Dialog Style Bits"
modal = 3 # visible | modal
modeless = 1 # visible
track_disk_space = 32
# UI customization properties
add_data(db, "Property",
# See "DefaultUIFont Property"
[("DefaultUIFont", "DlgFont8"),
# See "ErrorDialog Style Bit"
("ErrorDialog", "ErrorDlg"),
("Progress1", "Install"), # modified in maintenance type dlg
("Progress2", "installs"),
("MaintenanceForm_Action", "Repair"),
# possible values: ALL, JUSTME
("WhichUsers", "ALL")
])
# Fonts, see "TextStyle Table"
add_data(db, "TextStyle",
[("DlgFont8", "Tahoma", 9, None, 0),
("DlgFontBold8", "Tahoma", 8, None, 1), #bold
("VerdanaBold10", "Verdana", 10, None, 1),
("VerdanaRed9", "Verdana", 9, 255, 0),
])
# UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
# Numbers indicate sequence; see sequence.py for how these action integrate
add_data(db, "InstallUISequence",
[("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
# In the user interface, assume all-users installation if privileged.
("SelectDirectoryDlg", "Not Installed", 1230),
# XXX no support for resume installations yet
#("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
("ProgressDlg", None, 1280)])
add_data(db, 'ActionText', text.ActionText)
add_data(db, 'UIText', text.UIText)
#####################################################################
# Standard dialogs: FatalError, UserExit, ExitDialog
fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
fatal.title("[ProductName] Installer ended prematurely")
fatal.back("< Back", "Finish", active = 0)
fatal.cancel("Cancel", "Back", active = 0)
fatal.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
fatal.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c=fatal.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
user_exit.title("[ProductName] Installer was interrupted")
user_exit.back("< Back", "Finish", active = 0)
user_exit.cancel("Cancel", "Back", active = 0)
user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
"[ProductName] setup was interrupted. Your system has not been modified. "
"To install this program at a later time, please run the installation again.")
user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = user_exit.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Exit")
exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
"Finish", "Finish", "Finish")
exit_dialog.title("Completing the [ProductName] Installer")
exit_dialog.back("< Back", "Finish", active = 0)
exit_dialog.cancel("Cancel", "Back", active = 0)
exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
"Click the Finish button to exit the Installer.")
c = exit_dialog.next("Finish", "Cancel", name="Finish")
c.event("EndDialog", "Return")
#####################################################################
# Required dialog: FilesInUse, ErrorDlg
inuse = PyDialog(db, "FilesInUse",
x, y, w, h,
19, # KeepModeless|Modal|Visible
title,
"Retry", "Retry", "Retry", bitmap=False)
inuse.text("Title", 15, 6, 200, 15, 0x30003,
r"{\DlgFontBold8}Files in Use")
inuse.text("Description", 20, 23, 280, 20, 0x30003,
"Some files that need to be updated are currently in use.")
inuse.text("Text", 20, 55, 330, 50, 3,
"The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
None, None, None)
c=inuse.back("Exit", "Ignore", name="Exit")
c.event("EndDialog", "Exit")
c=inuse.next("Ignore", "Retry", name="Ignore")
c.event("EndDialog", "Ignore")
c=inuse.cancel("Retry", "Exit", name="Retry")
c.event("EndDialog","Retry")
# See "Error Dialog". See "ICE20" for the required names of the controls.
error = Dialog(db, "ErrorDlg",
50, 10, 330, 101,
65543, # Error|Minimize|Modal|Visible
title,
"ErrorText", None, None)
error.text("ErrorText", 50,9,280,48,3, "")
#error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
#####################################################################
# Global "Query Cancel" dialog
cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
"No", "No", "No")
cancel.text("Text", 48, 15, 194, 30, 3,
"Are you sure you want to cancel [ProductName] installation?")
#cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
# "py.ico", None, None)
c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
c.event("EndDialog", "Exit")
c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
c.event("EndDialog", "Return")
#####################################################################
# Global "Wait for costing" dialog
costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
"Return", "Return", "Return")
costing.text("Text", 48, 15, 194, 30, 3,
"Please wait while the installer finishes determining your disk space requirements.")
c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
c.event("EndDialog", "Exit")
#####################################################################
# Preparation dialog: no user input except cancellation
prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel")
prep.text("Description", 15, 70, 320, 40, 0x30003,
"Please wait while the Installer prepares to guide you through the installation.")
prep.title("Welcome to the [ProductName] Installer")
c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
c.mapping("ActionText", "Text")
c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
c.mapping("ActionData", "Text")
prep.back("Back", None, active=0)
prep.next("Next", None, active=0)
c=prep.cancel("Cancel", None)
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Target directory selection
seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
seldlg.title("Select Destination Directory")
version = sys.version[:3]+" "
seldlg.text("Hint", 15, 30, 300, 40, 3,
"The destination directory should contain a Python %sinstallation" % version)
seldlg.back("< Back", None, active=0)
c = seldlg.next("Next >", "Cancel")
c.event("SetTargetPath", "TARGETDIR", ordering=1)
c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=2)
c.event("EndDialog", "Return", ordering=3)
c = seldlg.cancel("Cancel", "DirectoryCombo")
c.event("SpawnDialog", "CancelDlg")
seldlg.control("DirectoryCombo", "DirectoryCombo", 15, 70, 272, 80, 393219,
"TARGETDIR", None, "DirectoryList", None)
seldlg.control("DirectoryList", "DirectoryList", 15, 90, 308, 136, 3, "TARGETDIR",
None, "PathEdit", None)
seldlg.control("PathEdit", "PathEdit", 15, 230, 306, 16, 3, "TARGETDIR", None, "Next", None)
c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
c.event("DirectoryListUp", "0")
c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
c.event("DirectoryListNew", "0")
#####################################################################
# Disk cost
cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
"OK", "OK", "OK", bitmap=False)
cost.text("Title", 15, 6, 200, 15, 0x30003,
"{\DlgFontBold8}Disk Space Requirements")
cost.text("Description", 20, 20, 280, 20, 0x30003,
"The disk space required for the installation of the selected features.")
cost.text("Text", 20, 53, 330, 60, 3,
"The highlighted volumes (if any) do not have enough disk space "
"available for the currently selected features. You can either "
"remove some files from the highlighted volumes, or choose to "
"install less features onto local drive(s), or select different "
"destination drive(s).")
cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
None, "{120}{70}{70}{70}{70}", None, None)
cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
#####################################################################
# WhichUsers Dialog. Only available on NT, and for privileged users.
# This must be run before FindRelatedProducts, because that will
# take into account whether the previous installation was per-user
# or per-machine. We currently don't support going back to this
# dialog after "Next" was selected; to support this, we would need to
# find how to reset the ALLUSERS property, and how to re-run
# FindRelatedProducts.
# On Windows9x, the ALLUSERS property is ignored on the command line
# and in the Property table, but installer fails according to the documentation
# if a dialog attempts to set ALLUSERS.
whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
"AdminInstall", "Next", "Cancel")
whichusers.title("Select whether to install [ProductName] for all users of this computer.")
# A radio group with two options: allusers, justme
g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
"WhichUsers", "", "Next")
g.add("ALL", 0, 5, 150, 20, "Install for all users")
g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
whichusers.back("Back", None, active=0)
c = whichusers.next("Next >", "Cancel")
c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
c.event("EndDialog", "Return", ordering = 2)
c = whichusers.cancel("Cancel", "AdminInstall")
c.event("SpawnDialog", "CancelDlg")
#####################################################################
# Installation Progress dialog (modeless)
progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
"Cancel", "Cancel", "Cancel", bitmap=False)
progress.text("Title", 20, 15, 200, 15, 0x30003,
"{\DlgFontBold8}[Progress1] [ProductName]")
progress.text("Text", 35, 65, 300, 30, 3,
"Please wait while the Installer [Progress2] [ProductName]. "
"This may take several minutes.")
progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
c.mapping("ActionText", "Text")
#c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
#c.mapping("ActionData", "Text")
c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
None, "Progress done", None, None)
c.mapping("SetProgress", "Progress")
progress.back("< Back", "Next", active=False)
progress.next("Next >", "Cancel", active=False)
progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
###################################################################
# Maintenance type: repair/uninstall
maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
"Next", "Next", "Cancel")
maint.title("Welcome to the [ProductName] Setup Wizard")
maint.text("BodyText", 15, 63, 330, 42, 3,
"Select whether you want to repair or remove [ProductName].")
g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
"MaintenanceForm_Action", "", "Next")
#g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
maint.back("< Back", None, active=False)
c=maint.next("Finish", "Cancel")
# Change installation: Change progress dialog to "Change", then ask
# for feature selection
#c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
#c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
# Reinstall: Change progress dialog to "Repair", then invoke reinstall
# Also set list of reinstalled features to "ALL"
c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
# Uninstall: Change progress to "Remove", then invoke uninstall
# Also set list of removed features to "ALL"
c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
# Close dialog when maintenance action scheduled
c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
#c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.msi" %
(fullname, self.target_version))
return installer_name

View File

@ -1,564 +0,0 @@
"""distutils.command.bdist_rpm
Implements the Distutils 'bdist_rpm' command (create RPM source and binary
distributions)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_rpm.py 52742 2006-11-12 18:56:18Z martin.v.loewis $"
import sys, os, string
import glob
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.util import get_platform
from distutils.file_util import write_file
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_rpm (Command):
description = "create an RPM distribution"
user_options = [
('bdist-base=', None,
"base directory for creating built distributions"),
('rpm-base=', None,
"base directory for creating RPMs (defaults to \"rpm\" under "
"--bdist-base; must be specified for RPM 2)"),
('dist-dir=', 'd',
"directory to put final RPM files in "
"(and .spec files if --spec-only)"),
('python=', None,
"path to Python interpreter to hard-code in the .spec file "
"(default: \"python\")"),
('fix-python', None,
"hard-code the exact path to the current Python interpreter in "
"the .spec file"),
('spec-only', None,
"only regenerate spec file"),
('source-only', None,
"only generate source RPM"),
('binary-only', None,
"only generate binary RPM"),
('use-bzip2', None,
"use bzip2 instead of gzip to create source distribution"),
# More meta-data: too RPM-specific to put in the setup script,
# but needs to go in the .spec file -- so we make these options
# to "bdist_rpm". The idea is that packagers would put this
# info in setup.cfg, although they are of course free to
# supply it on the command line.
('distribution-name=', None,
"name of the (Linux) distribution to which this "
"RPM applies (*not* the name of the module distribution!)"),
('group=', None,
"package classification [default: \"Development/Libraries\"]"),
('release=', None,
"RPM release number"),
('serial=', None,
"RPM serial number"),
('vendor=', None,
"RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
"[default: maintainer or author from setup script]"),
('packager=', None,
"RPM packager (eg. \"Jane Doe <jane@example.net>\")"
"[default: vendor]"),
('doc-files=', None,
"list of documentation files (space or comma-separated)"),
('changelog=', None,
"RPM changelog"),
('icon=', None,
"name of icon file"),
('provides=', None,
"capabilities provided by this package"),
('requires=', None,
"capabilities required by this package"),
('conflicts=', None,
"capabilities which conflict with this package"),
('build-requires=', None,
"capabilities required to build this package"),
('obsoletes=', None,
"capabilities made obsolete by this package"),
('no-autoreq', None,
"do not automatically calculate dependencies"),
# Actions to take when building RPM
('keep-temp', 'k',
"don't clean up RPM build directory"),
('no-keep-temp', None,
"clean up RPM build directory [default]"),
('use-rpm-opt-flags', None,
"compile with RPM_OPT_FLAGS when building from source RPM"),
('no-rpm-opt-flags', None,
"do not pass any RPM CFLAGS to compiler"),
('rpm3-mode', None,
"RPM 3 compatibility mode (default)"),
('rpm2-mode', None,
"RPM 2 compatibility mode"),
# Add the hooks necessary for specifying custom scripts
('prep-script=', None,
"Specify a script for the PREP phase of RPM building"),
('build-script=', None,
"Specify a script for the BUILD phase of RPM building"),
('pre-install=', None,
"Specify a script for the pre-INSTALL phase of RPM building"),
('install-script=', None,
"Specify a script for the INSTALL phase of RPM building"),
('post-install=', None,
"Specify a script for the post-INSTALL phase of RPM building"),
('pre-uninstall=', None,
"Specify a script for the pre-UNINSTALL phase of RPM building"),
('post-uninstall=', None,
"Specify a script for the post-UNINSTALL phase of RPM building"),
('clean-script=', None,
"Specify a script for the CLEAN phase of RPM building"),
('verify-script=', None,
"Specify a script for the VERIFY phase of the RPM build"),
# Allow a packager to explicitly force an architecture
('force-arch=', None,
"Force an architecture onto the RPM build process"),
]
boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
'no-autoreq']
negative_opt = {'no-keep-temp': 'keep-temp',
'no-rpm-opt-flags': 'use-rpm-opt-flags',
'rpm2-mode': 'rpm3-mode'}
def initialize_options (self):
self.bdist_base = None
self.rpm_base = None
self.dist_dir = None
self.python = None
self.fix_python = None
self.spec_only = None
self.binary_only = None
self.source_only = None
self.use_bzip2 = None
self.distribution_name = None
self.group = None
self.release = None
self.serial = None
self.vendor = None
self.packager = None
self.doc_files = None
self.changelog = None
self.icon = None
self.prep_script = None
self.build_script = None
self.install_script = None
self.clean_script = None
self.verify_script = None
self.pre_install = None
self.post_install = None
self.pre_uninstall = None
self.post_uninstall = None
self.prep = None
self.provides = None
self.requires = None
self.conflicts = None
self.build_requires = None
self.obsoletes = None
self.keep_temp = 0
self.use_rpm_opt_flags = 1
self.rpm3_mode = 1
self.no_autoreq = 0
self.force_arch = None
# initialize_options()
def finalize_options (self):
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
if self.rpm_base is None:
if not self.rpm3_mode:
raise DistutilsOptionError, \
"you must specify --rpm-base in RPM 2 mode"
self.rpm_base = os.path.join(self.bdist_base, "rpm")
if self.python is None:
if self.fix_python:
self.python = sys.executable
else:
self.python = "python"
elif self.fix_python:
raise DistutilsOptionError, \
"--python and --fix-python are mutually exclusive options"
if os.name != 'posix':
raise DistutilsPlatformError, \
("don't know how to create RPM "
"distributions on platform %s" % os.name)
if self.binary_only and self.source_only:
raise DistutilsOptionError, \
"cannot supply both '--source-only' and '--binary-only'"
# don't pass CFLAGS to pure python distributions
if not self.distribution.has_ext_modules():
self.use_rpm_opt_flags = 0
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
self.finalize_package_data()
# finalize_options()
def finalize_package_data (self):
self.ensure_string('group', "Development/Libraries")
self.ensure_string('vendor',
"%s <%s>" % (self.distribution.get_contact(),
self.distribution.get_contact_email()))
self.ensure_string('packager')
self.ensure_string_list('doc_files')
if type(self.doc_files) is ListType:
for readme in ('README', 'README.txt'):
if os.path.exists(readme) and readme not in self.doc_files:
self.doc_files.append(readme)
self.ensure_string('release', "1")
self.ensure_string('serial') # should it be an int?
self.ensure_string('distribution_name')
self.ensure_string('changelog')
# Format changelog correctly
self.changelog = self._format_changelog(self.changelog)
self.ensure_filename('icon')
self.ensure_filename('prep_script')
self.ensure_filename('build_script')
self.ensure_filename('install_script')
self.ensure_filename('clean_script')
self.ensure_filename('verify_script')
self.ensure_filename('pre_install')
self.ensure_filename('post_install')
self.ensure_filename('pre_uninstall')
self.ensure_filename('post_uninstall')
# XXX don't forget we punted on summaries and descriptions -- they
# should be handled here eventually!
# Now *this* is some meta-data that belongs in the setup script...
self.ensure_string_list('provides')
self.ensure_string_list('requires')
self.ensure_string_list('conflicts')
self.ensure_string_list('build_requires')
self.ensure_string_list('obsoletes')
self.ensure_string('force_arch')
# finalize_package_data ()
def run (self):
if DEBUG:
print "before _get_package_data():"
print "vendor =", self.vendor
print "packager =", self.packager
print "doc_files =", self.doc_files
print "changelog =", self.changelog
# make directories
if self.spec_only:
spec_dir = self.dist_dir
self.mkpath(spec_dir)
else:
rpm_dir = {}
for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
rpm_dir[d] = os.path.join(self.rpm_base, d)
self.mkpath(rpm_dir[d])
spec_dir = rpm_dir['SPECS']
# Spec file goes into 'dist_dir' if '--spec-only specified',
# build/rpm.<plat> otherwise.
spec_path = os.path.join(spec_dir,
"%s.spec" % self.distribution.get_name())
self.execute(write_file,
(spec_path,
self._make_spec_file()),
"writing '%s'" % spec_path)
if self.spec_only: # stop if requested
return
# Make a source distribution and copy to SOURCES directory with
# optional icon.
saved_dist_files = self.distribution.dist_files[:]
sdist = self.reinitialize_command('sdist')
if self.use_bzip2:
sdist.formats = ['bztar']
else:
sdist.formats = ['gztar']
self.run_command('sdist')
self.distribution.dist_files = saved_dist_files
source = sdist.get_archive_files()[0]
source_dir = rpm_dir['SOURCES']
self.copy_file(source, source_dir)
if self.icon:
if os.path.exists(self.icon):
self.copy_file(self.icon, source_dir)
else:
raise DistutilsFileError, \
"icon file '%s' does not exist" % self.icon
# build package
log.info("building RPMs")
rpm_cmd = ['rpm']
if os.path.exists('/usr/bin/rpmbuild') or \
os.path.exists('/bin/rpmbuild'):
rpm_cmd = ['rpmbuild']
if self.source_only: # what kind of RPMs?
rpm_cmd.append('-bs')
elif self.binary_only:
rpm_cmd.append('-bb')
else:
rpm_cmd.append('-ba')
if self.rpm3_mode:
rpm_cmd.extend(['--define',
'_topdir %s' % os.path.abspath(self.rpm_base)])
if not self.keep_temp:
rpm_cmd.append('--clean')
rpm_cmd.append(spec_path)
# Determine the binary rpm names that should be built out of this spec
# file
# Note that some of these may not be really built (if the file
# list is empty)
nvr_string = "%{name}-%{version}-%{release}"
src_rpm = nvr_string + ".src.rpm"
non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
src_rpm, non_src_rpm, spec_path)
out = os.popen(q_cmd)
binary_rpms = []
source_rpm = None
while 1:
line = out.readline()
if not line:
break
l = string.split(string.strip(line))
assert(len(l) == 2)
binary_rpms.append(l[1])
# The source rpm is named after the first entry in the spec file
if source_rpm is None:
source_rpm = l[0]
status = out.close()
if status:
raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
self.spawn(rpm_cmd)
if not self.dry_run:
if not self.binary_only:
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
assert(os.path.exists(srpm))
self.move_file(srpm, self.dist_dir)
if not self.source_only:
for rpm in binary_rpms:
rpm = os.path.join(rpm_dir['RPMS'], rpm)
if os.path.exists(rpm):
self.move_file(rpm, self.dist_dir)
# run()
def _dist_path(self, path):
return os.path.join(self.dist_dir, os.path.basename(path))
def _make_spec_file(self):
"""Generate the text of an RPM spec file and return it as a
list of strings (one per line).
"""
# definitions and headers
spec_file = [
'%define name ' + self.distribution.get_name(),
'%define version ' + self.distribution.get_version().replace('-','_'),
'%define unmangled_version ' + self.distribution.get_version(),
'%define release ' + self.release.replace('-','_'),
'',
'Summary: ' + self.distribution.get_description(),
]
# put locale summaries into spec file
# XXX not supported for now (hard to put a dictionary
# in a config file -- arg!)
#for locale in self.summaries.keys():
# spec_file.append('Summary(%s): %s' % (locale,
# self.summaries[locale]))
spec_file.extend([
'Name: %{name}',
'Version: %{version}',
'Release: %{release}',])
# XXX yuck! this filename is available from the "sdist" command,
# but only after it has run: and we create the spec file before
# running "sdist", in case of --spec-only.
if self.use_bzip2:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
else:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
spec_file.extend([
'License: ' + self.distribution.get_license(),
'Group: ' + self.group,
'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
'Prefix: %{_prefix}', ])
if not self.force_arch:
# noarch if no extension modules
if not self.distribution.has_ext_modules():
spec_file.append('BuildArch: noarch')
else:
spec_file.append( 'BuildArch: %s' % self.force_arch )
for field in ('Vendor',
'Packager',
'Provides',
'Requires',
'Conflicts',
'Obsoletes',
):
val = getattr(self, string.lower(field))
if type(val) is ListType:
spec_file.append('%s: %s' % (field, string.join(val)))
elif val is not None:
spec_file.append('%s: %s' % (field, val))
if self.distribution.get_url() != 'UNKNOWN':
spec_file.append('Url: ' + self.distribution.get_url())
if self.distribution_name:
spec_file.append('Distribution: ' + self.distribution_name)
if self.build_requires:
spec_file.append('BuildRequires: ' +
string.join(self.build_requires))
if self.icon:
spec_file.append('Icon: ' + os.path.basename(self.icon))
if self.no_autoreq:
spec_file.append('AutoReq: 0')
spec_file.extend([
'',
'%description',
self.distribution.get_long_description()
])
# put locale descriptions into spec file
# XXX again, suppressed because config file syntax doesn't
# easily support this ;-(
#for locale in self.descriptions.keys():
# spec_file.extend([
# '',
# '%description -l ' + locale,
# self.descriptions[locale],
# ])
# rpm scripts
# figure out default build script
def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
def_build = "%s build" % def_setup_call
if self.use_rpm_opt_flags:
def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
# insert contents of files
# XXX this is kind of misleading: user-supplied options are files
# that we open and interpolate into the spec file, but the defaults
# are just text that we drop in as-is. Hmmm.
script_options = [
('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
('build', 'build_script', def_build),
('install', 'install_script',
("%s install "
"--root=$RPM_BUILD_ROOT "
"--record=INSTALLED_FILES") % def_setup_call),
('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
('verifyscript', 'verify_script', None),
('pre', 'pre_install', None),
('post', 'post_install', None),
('preun', 'pre_uninstall', None),
('postun', 'post_uninstall', None),
]
for (rpm_opt, attr, default) in script_options:
# Insert contents of file referred to, if no file is referred to
# use 'default' as contents of script
val = getattr(self, attr)
if val or default:
spec_file.extend([
'',
'%' + rpm_opt,])
if val:
spec_file.extend(string.split(open(val, 'r').read(), '\n'))
else:
spec_file.append(default)
# files section
spec_file.extend([
'',
'%files -f INSTALLED_FILES',
'%defattr(-,root,root)',
])
if self.doc_files:
spec_file.append('%doc ' + string.join(self.doc_files))
if self.changelog:
spec_file.extend([
'',
'%changelog',])
spec_file.extend(self.changelog)
return spec_file
# _make_spec_file ()
def _format_changelog(self, changelog):
"""Format the changelog correctly and convert it to a list of strings
"""
if not changelog:
return changelog
new_changelog = []
for line in string.split(string.strip(changelog), '\n'):
line = string.strip(line)
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] == '-':
new_changelog.append(line)
else:
new_changelog.append(' ' + line)
# strip trailing newline inserted by first changelog entry
if not new_changelog[0]:
del new_changelog[0]
return new_changelog
# _format_changelog()
# class bdist_rpm

View File

@ -1,328 +0,0 @@
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bdist_wininst.py 38697 2005-03-23 18:54:36Z loewis $"
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', None,
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
"any files are installed. This script need not be in the "
"distribution"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
self.pre_install_script = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if not self.skip_build and self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be %s, or the '--skip_build'" \
" option must be specified" % (short_version,)
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
if self.distribution.has_ext_modules():
# If we are building an installer for a Python version other
# than the one we are currently running, then we need to ensure
# our build_lib reflects the other Python version rather than ours.
# Note that for target_version!=sys.version, we must have skipped the
# build step, so there is no issue with enforcing the build of this
# version.
target_version = self.target_version
if not target_version:
assert self.skip_build, "Should have already checked this"
target_version = sys.version[0:3]
plat_specifier = ".%s-%s" % (get_platform(), target_version)
build = self.get_finalized_command('build')
build.build_lib = os.path.join(build.build_base,
'lib' + plat_specifier)
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_wininst', pyversion,
self.get_installer_filename(fullname)))
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
# Escape newline characters
def escape(s):
return string.replace(s, "\n", "\\n")
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), escape(data)))
lines.append("%s=%s" % (name, escape(data)))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % escape(info))
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % escape(title))
import time
import distutils
build_info = "Built %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
installer_name = self.get_installer_filename(fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
# Convert cfgdata from unicode to ascii, mbcs encoded
try:
unicode
except NameError:
pass
else:
if isinstance(cfgdata, unicode):
cfgdata = cfgdata.encode("mbcs")
# Append the pre-install script
cfgdata = cfgdata + "\0"
if self.pre_install_script:
script_data = open(self.pre_install_script, "r").read()
cfgdata = cfgdata + script_data + "\n\0"
else:
# empty pre-install script
cfgdata = cfgdata + "\0"
file.write(cfgdata)
# The 'magic number' 0x1234567B is used to make sure that the
# binary layout of 'cfgdata' is what the wininst.exe binary
# expects. If the layout changes, increment that number, make
# the corresponding changes to the wininst.exe sources, and
# recompile them.
header = struct.pack("<iii",
0x1234567B, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_installer_filename(self, fullname):
# Factored out to allow overriding in subclasses
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
return installer_name
# get_installer_filename()
def get_exe_bytes (self):
from distutils.msvccompiler import get_build_version
# If a target-version other than the current version has been
# specified, then using the MSVC version from *this* build is no good.
# Without actually finding and executing the target version and parsing
# its sys.version, we just hard-code our knowledge of old versions.
# NOTE: Possible alternative is to allow "--target-version" to
# specify a Python executable rather than a simple version string.
# We can then execute this program to obtain any info we need, such
# as the real sys.version string for the build.
cur_version = get_python_version()
if self.target_version and self.target_version != cur_version:
# If the target version is *later* than us, then we assume they
# use what we use
# string compares seem wrong, but are what sysconfig.py itself uses
if self.target_version > cur_version:
bv = get_build_version()
else:
if self.target_version < "2.4":
bv = "6"
else:
bv = "7.1"
else:
# for current version - use authoritative check.
bv = get_build_version()
# wininst-x.y.exe is in the same directory as this file
directory = os.path.dirname(__file__)
# we must use a wininst-x.y.exe built with the same C compiler
# used for python. XXX What about mingw, borland, and so on?
filename = os.path.join(directory, "wininst-%s.exe" % bv)
return open(filename, "rb").read()
# class bdist_wininst

View File

@ -1,136 +0,0 @@
"""distutils.command.build
Implements the Distutils 'build' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os
from distutils.core import Command
from distutils.util import get_platform
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build (Command):
description = "build everything needed to install"
user_options = [
('build-base=', 'b',
"base directory for build library"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('build-scripts=', None,
"build directory for scripts"),
('build-temp=', 't',
"temporary build directory"),
('compiler=', 'c',
"specify the compiler type"),
('debug', 'g',
"compile extensions and libraries with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('executable=', 'e',
"specify final destination interpreter path (build.py)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_base = 'build'
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.compiler = None
self.debug = None
self.force = 0
self.executable = None
def finalize_options (self):
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
# 'build_purelib' and 'build_platlib' just default to 'lib' and
# 'lib.<plat>' under the base build directory. We only use one of
# them for a given distribution, though --
if self.build_purelib is None:
self.build_purelib = os.path.join(self.build_base, 'lib')
if self.build_platlib is None:
self.build_platlib = os.path.join(self.build_base,
'lib' + plat_specifier)
# 'build_lib' is the actual directory that we will use for this
# particular module distribution -- if user didn't supply it, pick
# one of 'build_purelib' or 'build_platlib'.
if self.build_lib is None:
if self.distribution.ext_modules:
self.build_lib = self.build_platlib
else:
self.build_lib = self.build_purelib
# 'build_temp' -- temporary directory for compiler turds,
# "build/temp.<plat>"
if self.build_temp is None:
self.build_temp = os.path.join(self.build_base,
'temp' + plat_specifier)
if self.build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
if self.executable is None:
self.executable = os.path.normpath(sys.executable)
# finalize_options ()
def run (self):
# Run all relevant sub-commands. This will be some subset of:
# - build_py - pure Python modules
# - build_clib - standalone C libraries
# - build_ext - Python extensions
# - build_scripts - (Python) scripts
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# -- Predicates for the sub-command list ---------------------------
def has_pure_modules (self):
return self.distribution.has_pure_modules()
def has_c_libraries (self):
return self.distribution.has_c_libraries()
def has_ext_modules (self):
return self.distribution.has_ext_modules()
def has_scripts (self):
return self.distribution.has_scripts()
sub_commands = [('build_py', has_pure_modules),
('build_clib', has_c_libraries),
('build_ext', has_ext_modules),
('build_scripts', has_scripts),
]
# class build

View File

@ -1,238 +0,0 @@
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_clib.py 37828 2004-11-10 22:23:15Z loewis $"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib (Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib', 'b',
"directory to build C/C++ libraries to"),
('build-temp', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
# initialize_options()
def finalize_options (self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs,
os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
# finalize_options()
def run (self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
# run()
def check_library_list (self, libraries):
"""Ensure that the list of libraries (presumably provided as a
command option 'libraries') is valid, i.e. it is a list of
2-tuples, where the tuples are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise."""
# Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
# with only names changed to protect the innocent!
if type(libraries) is not ListType:
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if type(lib) is not TupleType and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
if type(lib[0]) is not StringType:
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if type(lib[1]) is not DictionaryType:
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
# for lib
# check_library_list ()
def get_library_names (self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
# get_library_names ()
def get_source_files (self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if (sources is None or
type(sources) not in (ListType, TupleType) ):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
# get_source_files ()
def build_libraries (self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
# for libraries
# build_libraries ()
# class build_lib

View File

@ -1,717 +0,0 @@
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_ext.py 54942 2007-04-24 15:27:25Z georg.brandl $"
import sys, os, string, re
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils import log
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext (Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.extensions = None
self.build_lib = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
def finalize_options (self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs, os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if type(self.libraries) is StringType:
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif type(self.library_dirs) is StringType:
self.library_dirs = string.split(self.library_dirs, os.pathsep)
if self.rpath is None:
self.rpath = []
elif type(self.rpath) is StringType:
self.rpath = string.split(self.rpath, os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
self.library_dirs.append(os.path.join(sys.exec_prefix, 'PCBuild'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux with a shared Python library,
# Python's library directory must be appended to library_dirs
if (sys.platform.startswith('linux') or sys.platform.startswith('gnu')) \
and sysconfig.get_config_var('Py_ENABLE_SHARED'):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = string.split(self.define, ',')
self.define = map(lambda symbol: (symbol, '1'), defines)
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = string.split(self.undef, ',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# finalize_options ()
def run (self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
# run ()
def check_extensions_list (self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if type(extensions) is not ListType:
raise DistutilsSetupError, \
"'ext_modules' option must be a list of Extension instances"
for i in range(len(extensions)):
ext = extensions[i]
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
(ext_name, build_info) = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if type(ext) is not TupleType and len(ext) != 2:
raise DistutilsSetupError, \
("each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
if not (type(ext_name) is StringType and
extension_name_re.match(ext_name)):
raise DistutilsSetupError, \
("first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if type(build_info) is not DictionaryType:
raise DistutilsSetupError, \
("second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if build_info.has_key('def_file'):
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (type(macro) is TupleType and
1 <= len(macro) <= 2):
raise DistutilsSetupError, \
("'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
# for extensions
# check_extensions_list ()
def get_source_files (self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs (self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
# get_outputs ()
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
# ignore build-lib -- put the compiled extension into
# the source tree along with pure Python modules
modpath = string.split(fullname, '.')
package = string.join(modpath[0:-1], '.')
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_filename,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources (self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
# swig_sources ()
def find_swig (self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError, \
("I don't know how to find (much less run) SWIG "
"on platform '%s'") % os.name
# find_swig ()
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullname (self, ext_name):
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename (self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = string.split(ext_name, '.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return apply(os.path.join, ext_path) + '_d' + so_ext
return apply(os.path.join, ext_path) + so_ext
def get_export_symbols (self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "init" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "init" + string.split(ext.name,'.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries (self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
# class build_ext

View File

@ -1,437 +0,0 @@
"""distutils.command.build_py
Implements the Distutils 'build_py' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_py.py 55747 2007-06-02 18:53:07Z neal.norwitz $"
import sys, string, os
from types import *
from glob import glob
from distutils.core import Command
from distutils.errors import *
from distutils.util import convert_path
from distutils import log
class build_py (Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options (self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if type(self.optimize) is not IntType:
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError, "optimize must be 0, 1, or 2"
def run (self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
# run ()
def get_data_files (self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files (self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files])
return files
def build_package_data (self):
"""Copy data files into build directory"""
lastdir = None
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir (self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = string.split(package, '.')
if not self.package_dir:
if path:
return apply(os.path.join, path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir[string.join(path, '.')]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return apply(os.path.join, tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return apply(os.path.join, tail)
else:
return ''
# get_package_dir ()
def check_package (self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError, \
"package directory '%s' does not exist" % package_dir
if not os.path.isdir(package_dir):
raise DistutilsFileError, \
("supposed package directory '%s' exists, " +
"but is not a directory") % package_dir
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
# check_package ()
def check_module (self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return 0
else:
return 1
# check_module ()
def find_package_modules (self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules (self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = string.split(module, '.')
package = string.join(path[0:-1], '.')
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
# find_modules ()
def find_all_modules (self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
# find_all_modules ()
def get_source_files (self):
modules = self.find_all_modules()
filenames = []
for module in modules:
filenames.append(module[-1])
return filenames
def get_module_outfile (self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return apply(os.path.join, outfile_path)
def get_outputs (self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = string.split(package, '.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module (self, module, module_file, package):
if type(package) is StringType:
package = string.split(package, '.')
elif type(package) not in (ListType, TupleType):
raise TypeError, \
"'package' must be a string (dot-separated), list, or tuple"
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules (self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
# build_modules ()
def build_packages (self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
# build_packages ()
def byte_compile (self, files):
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
# class build_py

View File

@ -1,158 +0,0 @@
"""distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_scripts.py 59668 2008-01-02 18:59:36Z guido.van.rossum $"
import sys, os, re
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts (Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options (self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options (self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run (self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts (self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
adjust = 0
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = 1
post_interp = match.group(1) or ''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
if not sysconfig.python_build:
executable = self.executable
else:
executable = os.path.join(
sysconfig.get_config_var("BINDIR"),
"python" + sysconfig.get_config_var("EXE"))
executable = fix_jython_executable(executable, post_interp)
if not self.dry_run:
outf = open(outfile, "w")
outf.write("#!%s%s\n" %
(executable,
post_interp))
outf.writelines(f.readlines())
outf.close()
if f:
f.close()
else:
if f:
f.close()
self.copy_file(script, outfile)
if hasattr(os, 'chmod'):
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 07777
newmode = (oldmode | 0555) & 07777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# copy_scripts ()
# class build_scripts
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except IOError, OSError:
return executable
return magic == '#!'
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn("WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable

View File

@ -1,82 +0,0 @@
"""distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: clean.py 38532 2005-03-03 08:12:27Z loewis $"
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean (Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
# class clean

View File

@ -1,45 +0,0 @@
"""distutils.command.x
Implements the Distutils 'x' command.
"""
# created 2000/mm/dd, John Doe
__revision__ = "$Id$"
from distutils.core import Command
class x (Command):
# Brief (40-50 characters) description of the command
description = ""
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [('', '',
""),
]
def initialize_options (self):
self. = None
self. = None
self. = None
# initialize_options()
def finalize_options (self):
if self.x is None:
self.x =
# finalize_options()
def run (self):
# run()
# class x

View File

@ -1,368 +0,0 @@
"""distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: config.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os, string, re
from types import *
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {'c': '.c',
'c++': '.cxx'}
class config (Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options (self):
self.compiler = None
self.cc = None
self.include_dirs = None
#self.define = None
#self.undef = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options (self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs, os.pathsep)
if self.libraries is None:
self.libraries = []
elif type(self.libraries) is StringType:
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif type(self.library_dirs) is StringType:
self.library_dirs = string.split(self.library_dirs, os.pathsep)
def run (self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler (self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile (self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
file = open(filename, "w")
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
file.close()
return filename
def _preprocess (self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile (self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean (self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", string.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = 1
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = 0
self._clean()
return ok
def search_cpp (self, pattern, body=None,
headers=None, include_dirs=None, lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
(src, out) = self._preprocess(body, headers, include_dirs, lang)
if type(pattern) is StringType:
pattern = re.compile(pattern)
file = open(out)
match = 0
while 1:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = 1
break
file.close()
self._clean()
return match
def try_compile (self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = 1
except CompileError:
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link (self, body,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = 1
except (CompileError, LinkError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run (self, body,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = 1
except (CompileError, LinkError, DistutilsExecError):
ok = 0
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func (self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = string.join(body, "\n") + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
# check_func ()
def check_lib (self, library, library_dirs=None,
headers=None, include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }",
headers, include_dirs,
[library]+other_libraries, library_dirs)
def check_header (self, header, include_dirs=None,
library_dirs=None, lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
# class config
def dump_file (filename, head=None):
if head is None:
print filename + ":"
else:
print head
file = open(filename)
sys.stdout.write(file.read())
file.close()

View File

@ -1,614 +0,0 @@
"""distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install.py 43363 2006-03-27 21:55:21Z phillip.eby $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.errors import DistutilsOptionError
from glob import glob
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
'mac': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'java': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
}
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if self.negative_opt.has_key(opt_name):
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install

View File

@ -1,85 +0,0 @@
"""distutils.command.install_data
Implements the Distutils 'install_data' command, for installing
platform-independent data files."""
# contributed by Bastian Kleineidam
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_data.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from types import StringType
from distutils.core import Command
from distutils.util import change_root, convert_path
class install_data (Command):
description = "install data files"
user_options = [
('install-dir=', 'd',
"base directory for installing data files "
"(default: installation base dir)"),
('root=', None,
"install everything relative to this alternate root directory"),
('force', 'f', "force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options (self):
self.install_dir = None
self.outfiles = []
self.root = None
self.force = 0
self.data_files = self.distribution.data_files
self.warn_dir = 1
def finalize_options (self):
self.set_undefined_options('install',
('install_data', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
def run (self):
self.mkpath(self.install_dir)
for f in self.data_files:
if type(f) is StringType:
# it's a simple file, so copy it
f = convert_path(f)
if self.warn_dir:
self.warn("setup script did not provide a directory for "
"'%s' -- installing right in '%s'" %
(f, self.install_dir))
(out, _) = self.copy_file(f, self.install_dir)
self.outfiles.append(out)
else:
# it's a tuple with path to install to and a list of files
dir = convert_path(f[0])
if not os.path.isabs(dir):
dir = os.path.join(self.install_dir, dir)
elif self.root:
dir = change_root(self.root, dir)
self.mkpath(dir)
if f[1] == []:
# If there are no files listed, the user must be
# trying to create an empty directory, so add the
# directory to the list of output files.
self.outfiles.append(dir)
else:
# Copy files, adding them to the list of output files.
for data in f[1]:
data = convert_path(data)
(out, _) = self.copy_file(data, dir)
self.outfiles.append(out)
def get_inputs (self):
return self.data_files or []
def get_outputs (self):
return self.outfiles

View File

@ -1,78 +0,0 @@
"""distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')

View File

@ -1,53 +0,0 @@
"""distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_headers.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.core import Command
class install_headers (Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options (self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run (self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs (self):
return self.distribution.headers or []
def get_outputs (self):
return self.outfiles
# class install_headers

View File

@ -1,223 +0,0 @@
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_lib.py 37946 2004-12-02 20:14:16Z lemburg $"
import sys, os, string
from types import IntType
from distutils.core import Command
from distutils.errors import DistutilsOptionError
# Extension for Python source files.
if hasattr(os, 'extsep'):
PYTHON_SOURCE_EXTENSION = os.extsep + "py"
else:
PYTHON_SOURCE_EXTENSION = ".py"
class install_lib (Command):
description = "install all Python modules (extensions and pure Python)"
# The byte-compilation options are a tad confusing. Here are the
# possible scenarios:
# 1) no compilation at all (--no-compile --no-optimize)
# 2) compile .pyc only (--compile --no-optimize; default)
# 3) compile .pyc and "level 1" .pyo (--compile --optimize)
# 4) compile "level 1" .pyo only (--no-compile --optimize)
# 5) compile .pyc and "level 2" .pyo (--compile --optimize-more)
# 6) compile "level 2" .pyo only (--no-compile --optimize-more)
#
# The UI for this is two option, 'compile' and 'optimize'.
# 'compile' is strictly boolean, and only decides whether to
# generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
# decides both whether to generate .pyo files and what level of
# optimization to use.
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'compile', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# let the 'install' command dictate our installation directory
self.install_dir = None
self.build_dir = None
self.force = 0
self.compile = None
self.optimize = None
self.skip_build = None
def finalize_options (self):
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options('install',
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('force', 'force'),
('compile', 'compile'),
('optimize', 'optimize'),
('skip_build', 'skip_build'),
)
if self.compile is None:
self.compile = 1
if self.optimize is None:
self.optimize = 0
if type(self.optimize) is not IntType:
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError, "optimize must be 0, 1, or 2"
def run (self):
# Make sure we have built everything we need first
self.build()
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of
# having a build directory!)
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
# run ()
# -- Top-level worker functions ------------------------------------
# (called from 'run()')
def build (self):
if not self.skip_build:
if self.distribution.has_pure_modules():
self.run_command('build_py')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
def install (self):
if os.path.isdir(self.build_dir):
outfiles = self.copy_tree(self.build_dir, self.install_dir)
else:
self.warn("'%s' does not exist -- no Python modules to install" %
self.build_dir)
return
return outfiles
def byte_compile (self, files):
from distutils.util import byte_compile
# Get the "--root" directory supplied to the "install" command,
# and use it as a prefix to strip off the purported filename
# encoded in bytecode files. This is far from complete, but it
# should at least generate usable bytecode in RPM distributions.
install_root = self.get_finalized_command('install').root
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=install_root,
dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=install_root,
verbose=self.verbose, dry_run=self.dry_run)
# -- Utility methods -----------------------------------------------
def _mutate_outputs (self, has_any, build_cmd, cmd_option, output_dir):
if not has_any:
return []
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
build_dir = getattr(build_cmd, cmd_option)
prefix_len = len(build_dir) + len(os.sep)
outputs = []
for file in build_files:
outputs.append(os.path.join(output_dir, file[prefix_len:]))
return outputs
# _mutate_outputs ()
def _bytecode_filenames (self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
# Since build_py handles package data installation, the
# list of outputs can contain more than just .py files.
# Make sure we only report bytecode for the .py files.
ext = os.path.splitext(os.path.normcase(py_file))[1]
if ext != PYTHON_SOURCE_EXTENSION:
continue
if self.compile:
bytecode_files.append(py_file + "c")
if self.optimize > 0:
bytecode_files.append(py_file + "o")
return bytecode_files
# -- External interface --------------------------------------------
# (called by outsiders)
def get_outputs (self):
"""Return the list of files that would be installed if this command
were actually run. Not affected by the "dry-run" flag or whether
modules have actually been built yet.
"""
pure_outputs = \
self._mutate_outputs(self.distribution.has_pure_modules(),
'build_py', 'build_lib',
self.install_dir)
if self.compile:
bytecode_outputs = self._bytecode_filenames(pure_outputs)
else:
bytecode_outputs = []
ext_outputs = \
self._mutate_outputs(self.distribution.has_ext_modules(),
'build_ext', 'build_lib',
self.install_dir)
return pure_outputs + bytecode_outputs + ext_outputs
# get_outputs ()
def get_inputs (self):
"""Get the list of files that are input to this command, ie. the
files that get installed as they are named in the build tree.
The files in this list correspond one-to-one to the output
filenames returned by 'get_outputs()'.
"""
inputs = []
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
inputs.extend(build_py.get_outputs())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
inputs.extend(build_ext.get_outputs())
return inputs
# class install_lib

View File

@ -1,66 +0,0 @@
"""distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Python scripts."""
# contributed by Bastian Kleineidam
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_scripts.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if hasattr(os, 'chmod'):
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts

View File

@ -1,294 +0,0 @@
"""distutils.command.register
Implements the Distutils 'register' command (register with the repository).
"""
# created 2002/10/21, Richard Jones
__revision__ = "$Id: register.py 56542 2007-07-25 16:24:08Z martin.v.loewis $"
import sys, os, string, urllib2, getpass, urlparse
import StringIO, ConfigParser
from distutils.core import Command
from distutils.errors import *
class register(Command):
description = ("register the distribution with the Python package index")
DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
user_options = [
('repository=', 'r',
"url of repository [default: %s]"%DEFAULT_REPOSITORY),
('list-classifiers', None,
'list the valid Trove classifiers'),
('show-response', None,
'display full response text from server'),
]
boolean_options = ['verify', 'show-response', 'list-classifiers']
def initialize_options(self):
self.repository = None
self.show_response = 0
self.list_classifiers = 0
def finalize_options(self):
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
def run(self):
self.check_metadata()
if self.dry_run:
self.verify_metadata()
elif self.list_classifiers:
self.classifiers()
else:
self.send_metadata()
def check_metadata(self):
"""Ensure that all required elements of meta-data (name, version,
URL, (author and author_email) or (maintainer and
maintainer_email)) are supplied by the Distribution object; warn if
any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: " +
string.join(missing, ", "))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
def classifiers(self):
''' Fetch the list of classifiers from the server.
'''
response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
print response.read()
def verify_metadata(self):
''' Send the metadata to the package index server to be checked.
'''
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
print 'Server response (%s): %s'%(code, result)
def send_metadata(self):
''' Send the metadata to the package index server.
Well, do the following:
1. figure who the user is, and then
2. send the data as a Basic auth'ed POST.
First we try to read the username/password from $HOME/.pypirc,
which is a ConfigParser-formatted file with a section
[server-login] containing username and password entries (both
in clear text). Eg:
[server-login]
username: fred
password: sekrit
Otherwise, to figure who the user is, we offer the user three
choices:
1. use existing login,
2. register as a new user, or
3. set the password to a random string and email the user.
'''
choice = 'x'
username = password = ''
# see if we can short-cut and get the username/password from the
# config
config = None
if os.environ.has_key('HOME'):
rc = os.path.join(os.environ['HOME'], '.pypirc')
if os.path.exists(rc):
print 'Using PyPI login from %s'%rc
config = ConfigParser.ConfigParser()
config.read(rc)
username = config.get('server-login', 'username')
password = config.get('server-login', 'password')
choice = '1'
# get the user's login info
choices = '1 2 3 4'.split()
while choice not in choices:
print '''We need to know who you are, so please choose either:
1. use your existing login,
2. register as a new user,
3. have the server generate a new password for you (and email it to you), or
4. quit
Your selection [default 1]: ''',
choice = raw_input()
if not choice:
choice = '1'
elif choice not in choices:
print 'Please choose one of the four options!'
if choice == '1':
# get the username and password
while not username:
username = raw_input('Username: ')
while not password:
password = getpass.getpass('Password: ')
# set up the authentication
auth = urllib2.HTTPPasswordMgr()
host = urlparse.urlparse(self.repository)[1]
auth.add_password('pypi', host, username, password)
# send the info to the server and report the result
code, result = self.post_to_server(self.build_post_data('submit'),
auth)
print 'Server response (%s): %s'%(code, result)
# possibly save the login
if os.environ.has_key('HOME') and config is None and code == 200:
rc = os.path.join(os.environ['HOME'], '.pypirc')
print 'I can store your PyPI login so future submissions will be faster.'
print '(the login will be stored in %s)'%rc
choice = 'X'
while choice.lower() not in 'yn':
choice = raw_input('Save your login (y/N)?')
if not choice:
choice = 'n'
if choice.lower() == 'y':
f = open(rc, 'w')
f.write('[server-login]\nusername:%s\npassword:%s\n'%(
username, password))
f.close()
try:
os.chmod(rc, 0600)
except:
pass
elif choice == '2':
data = {':action': 'user'}
data['name'] = data['password'] = data['email'] = ''
data['confirm'] = None
while not data['name']:
data['name'] = raw_input('Username: ')
while data['password'] != data['confirm']:
while not data['password']:
data['password'] = getpass.getpass('Password: ')
while not data['confirm']:
data['confirm'] = getpass.getpass(' Confirm: ')
if data['password'] != data['confirm']:
data['password'] = ''
data['confirm'] = None
print "Password and confirm don't match!"
while not data['email']:
data['email'] = raw_input(' EMail: ')
code, result = self.post_to_server(data)
if code != 200:
print 'Server response (%s): %s'%(code, result)
else:
print 'You will receive an email shortly.'
print 'Follow the instructions in it to complete registration.'
elif choice == '3':
data = {':action': 'password_reset'}
data['email'] = ''
while not data['email']:
data['email'] = raw_input('Your email address: ')
code, result = self.post_to_server(data)
print 'Server response (%s): %s'%(code, result)
def build_post_data(self, action):
# figure the data to send - the metadata plus some additional
# information used by the package server
meta = self.distribution.metadata
data = {
':action': action,
'metadata_version' : '1.0',
'name': meta.get_name(),
'version': meta.get_version(),
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
if data['provides'] or data['requires'] or data['obsoletes']:
data['metadata_version'] = '1.1'
return data
def post_to_server(self, data, auth=None):
''' Post a query to the server, and return a string response.
'''
# Build up the MIME payload for the urllib2 POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) not in (type([]), type( () )):
value = [value]
for value in value:
value = unicode(value).encode("utf-8")
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
# build the Request
headers = {
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
'Content-length': str(len(body))
}
req = urllib2.Request(self.repository, body, headers)
# handle HTTP and include the Basic Auth handler
opener = urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(password_mgr=auth)
)
data = ''
try:
result = opener.open(req)
except urllib2.HTTPError, e:
if self.show_response:
data = e.fp.read()
result = e.code, e.msg
except urllib2.URLError, e:
result = 500, str(e)
else:
if self.show_response:
data = result.read()
result = 200, 'OK'
if self.show_response:
print '-'*75, data, '-'*75
return result

View File

@ -1,469 +0,0 @@
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: sdist.py 61268 2008-03-06 07:14:26Z martin.v.loewis $"
import sys, os, string
from types import *
from glob import glob
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import *
from distutils.filelist import FileList
from distutils import log
def show_formats ():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats=[]
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help(
"List of available source distribution formats:")
class sdist (Command):
description = "create a source distribution (tarball, zip file, etc.)"
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual"),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = { 'posix': 'gztar',
'java': 'gztar',
'nt': 'zip' }
def initialize_options (self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
def finalize_options (self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run (self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Ensure that all required meta-data is given; warn if not (but
# don't die, it's not *that* serious!)
self.check_metadata()
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata (self):
"""Ensure that all required elements of meta-data (name, version,
URL, (author and author_email) or (maintainer and
maintainer_email)) are supplied by the Distribution object; warn if
any are missing.
"""
metadata = self.distribution.metadata
missing = []
for attr in ('name', 'version', 'url'):
if not (hasattr(metadata, attr) and getattr(metadata, attr)):
missing.append(attr)
if missing:
self.warn("missing required meta-data: " +
string.join(missing, ", "))
if metadata.author:
if not metadata.author_email:
self.warn("missing meta-data: if 'author' supplied, " +
"'author_email' must be supplied too")
elif metadata.maintainer:
if not metadata.maintainer_email:
self.warn("missing meta-data: if 'maintainer' supplied, " +
"'maintainer_email' must be supplied too")
else:
self.warn("missing meta-data: either (author and author_email) " +
"or (maintainer and maintainer_email) " +
"must be supplied")
# check_metadata ()
def get_file_list (self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options and the state of the filesystem.
"""
# If we have a manifest template, see if it's newer than the
# manifest; if so, we'll regenerate the manifest.
template_exists = os.path.isfile(self.template)
if template_exists:
template_newer = dep_util.newer(self.template, self.manifest)
# The contents of the manifest file almost certainly depend on the
# setup script as well as the manifest template -- so if the setup
# script is newer than the manifest, we'll regenerate the manifest
# from the template. (Well, not quite: if we already have a
# manifest, but there's no template -- which will happen if the
# developer elects to generate a manifest some other way -- then we
# can't regenerate the manifest, so we don't.)
self.debug_print("checking if %s newer than %s" %
(self.distribution.script_name, self.manifest))
setup_newer = dep_util.newer(self.distribution.script_name,
self.manifest)
# cases:
# 1) no manifest, template exists: generate manifest
# (covered by 2a: no manifest == template newer)
# 2) manifest & template exist:
# 2a) template or setup script newer than manifest:
# regenerate manifest
# 2b) manifest newer than both:
# do nothing (unless --force or --manifest-only)
# 3) manifest exists, no template:
# do nothing (unless --force or --manifest-only)
# 4) no manifest, no template: generate w/ warning ("defaults only")
manifest_outofdate = (template_exists and
(template_newer or setup_newer))
force_regen = self.force_manifest or self.manifest_only
manifest_exists = os.path.isfile(self.manifest)
neither_exists = (not template_exists and not manifest_exists)
# Regenerate the manifest if necessary (or if explicitly told to)
if manifest_outofdate or neither_exists or force_regen:
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
# Don't regenerate the manifest, just read it in.
else:
self.read_manifest()
# get_file_list ()
def add_defaults (self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if type(fn) is TupleType:
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
# add_defaults ()
def read_template (self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
# read_template ()
def prune_file_list (self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
self.filelist.exclude_pattern(r'(^|/)(RCS|CVS|\.svn|\.hg|\.git|\.bzr|_darcs)/.*', is_regex=1)
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
self.execute(file_util.write_file,
(self.manifest, self.filelist.files),
"writing manifest file '%s'" % self.manifest)
# write_manifest ()
def read_manifest (self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
try:
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
finally:
manifest.close()
# read_manifest ()
def make_release_tree (self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
# make_release_tree ()
def make_distribution (self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files (self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
# class sdist

View File

@ -1,199 +0,0 @@
"""distutils.command.upload
Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
from distutils.errors import *
from distutils.core import Command
from distutils.spawn import spawn
from distutils import log
from hashlib import md5
import os
import socket
import platform
import ConfigParser
import httplib
import base64
import urlparse
import cStringIO as StringIO
class upload(Command):
description = "upload binary package to PyPI"
DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('sign', 's',
'sign files to upload using gpg'),
('identity=', 'i', 'GPG identity used to sign files'),
]
boolean_options = ['show-response', 'sign']
def initialize_options(self):
self.username = ''
self.password = ''
self.repository = ''
self.show_response = 0
self.sign = False
self.identity = None
def finalize_options(self):
if self.identity and not self.sign:
raise DistutilsOptionError(
"Must use --sign for --identity to have meaning"
)
if os.environ.has_key('HOME'):
rc = os.path.join(os.environ['HOME'], '.pypirc')
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
config = ConfigParser.ConfigParser({
'username':'',
'password':'',
'repository':''})
config.read(rc)
if not self.repository:
self.repository = config.get('server-login', 'repository')
if not self.username:
self.username = config.get('server-login', 'username')
if not self.password:
self.password = config.get('server-login', 'password')
if not self.repository:
self.repository = self.DEFAULT_REPOSITORY
def run(self):
if not self.distribution.dist_files:
raise DistutilsOptionError("No dist file created in earlier command")
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
def upload_file(self, command, pyversion, filename):
# Sign if requested
if self.sign:
gpg_args = ["gpg", "--detach-sign", "-a", filename]
if self.identity:
gpg_args[2:2] = ["--local-user", self.identity]
spawn(gpg_args,
dry_run=self.dry_run)
# Fill in the data - send all the meta-data in case we need to
# register a new release
content = open(filename,'rb').read()
meta = self.distribution.metadata
data = {
# action
':action': 'file_upload',
'protcol_version': '1',
# identify release
'name': meta.get_name(),
'version': meta.get_version(),
# file content
'content': (os.path.basename(filename),content),
'filetype': command,
'pyversion': pyversion,
'md5_digest': md5(content).hexdigest(),
# additional meta-data
'metadata_version' : '1.0',
'summary': meta.get_description(),
'home_page': meta.get_url(),
'author': meta.get_contact(),
'author_email': meta.get_contact_email(),
'license': meta.get_licence(),
'description': meta.get_long_description(),
'keywords': meta.get_keywords(),
'platform': meta.get_platforms(),
'classifiers': meta.get_classifiers(),
'download_url': meta.get_download_url(),
# PEP 314
'provides': meta.get_provides(),
'requires': meta.get_requires(),
'obsoletes': meta.get_obsoletes(),
}
comment = ''
if command == 'bdist_rpm':
dist, version, id = platform.dist()
if dist:
comment = 'built for %s %s' % (dist, version)
elif command == 'bdist_dumb':
comment = 'built for %s' % platform.platform(terse=1)
data['comment'] = comment
if self.sign:
data['gpg_signature'] = (os.path.basename(filename) + ".asc",
open(filename+".asc").read())
# set up the authentication
auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = '\n--' + boundary
end_boundary = sep_boundary + '--'
body = StringIO.StringIO()
for key, value in data.items():
# handle multiple entries for the same name
if type(value) != type([]):
value = [value]
for value in value:
if type(value) is tuple:
fn = ';filename="%s"' % value[0]
value = value[1]
else:
fn = ""
value = str(value)
body.write(sep_boundary)
body.write('\nContent-Disposition: form-data; name="%s"'%key)
body.write(fn)
body.write("\n\n")
body.write(value)
if value and value[-1] == '\r':
body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body.write("\n")
body = body.getvalue()
self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urlparse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
http = httplib.HTTPConnection(netloc)
elif schema == 'https':
http = httplib.HTTPSConnection(netloc)
else:
raise AssertionError, "unsupported schema "+schema
data = ''
loglevel = log.INFO
try:
http.connect()
http.putrequest("POST", url)
http.putheader('Content-type',
'multipart/form-data; boundary=%s'%boundary)
http.putheader('Content-length', str(len(body)))
http.putheader('Authorization', auth)
http.endheaders()
http.send(body)
except socket.error, e:
self.announce(str(e), log.ERROR)
return
r = http.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print '-'*75, r.read(), '-'*75

Some files were not shown because too many files have changed in this diff Show More