How can a README.md file be included in a PyPI module package using setup.py? - setup.py

I want to include a README.md file with my module package for PyPI such that it can be read by a function in my setup.py. However, it is not obvious to me how to get setup.py and related infrastructure to actually include the README.md file.
I have included a MANIFEST.in file in my package that itself lists README.md and I have set the setuptools.setup argument include_package_data to True but this has not worked.
manifest.in:
junkmodule.py
junkmodule_script.py
LICENSE
MANIFEST.in
README.md
setup.py
setup.py:
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import pypandoc
import setuptools
def main():
setuptools.setup(
name = "junkmodule",
version = "2017.01.13.1416",
description = "junk testing module",
long_description = pypandoc.convert("README.md", "rst"),
url = "https://github.com/user/junkmodule",
author = "LRH",
author_email = "lhr#psern.ch",
license = "GPLv3",
include_package_data = True,
py_modules = [
"junkmodule"
],
install_requires = [
"numpy"
],
scripts = [
"junkmodule_script.py"
],
entry_points = """
[console_scripts]
junkmodule = junkmodule:junkmodule
"""
)
if __name__ == "__main__":
main()
The commands I use to register and upload the module to PyPI are as follows:
python setup.py register -r https://pypi.python.org/pypi
python setup.py sdist upload -r https://pypi.python.org/pypi

I'm using this in my modules, try:
import pypandoc
try:
description=pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
description=open('README.md').read()

Related

cx_freeze and PyQt5 build error when building the project

I'm having issues getting cx_freeze to build my program, so I started afresh and discovered the issue was with PyQt5. I stripped down the code to this
import os
import sys
from PyQt5 import QtWidgets
print("program ran!")
The setup.py looks like this:
from setuptools import find_packages
from cx_Freeze import setup, Executable
options = {
'build_exe': {
'includes': [
],
'packages': [
'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtWidgets'
],
'excludes': [
]
}
}
executables = [
Executable('test2.py',
base=None,
targetName='program.exe')
]
setup(
name='program',
packages=find_packages(),
version='0.1',
description='PCPRo 2022',
executables=executables,
options=options
)
I build the program using the command python setup.py build
When I run the resulting .exe I get the error:
Fatal Python error: _PyInterpreterState_Get(): no current thread state
Python runtime state: unknown
If I remove the line from PyQt5 import QtWidgets and rebuild, the program runs as expected.
I am using Python 3.7.4 [MSC v.1916 64 bit (AMD64)] on win32

Conan Errror : install loop detected in context host .. requires .. which is an ancestor to

I know that this might seem obvious for some of you but I've been digging everywhere for answers with no success. I'm trying to Conan install my own package from my repo but I can't get over this error. It tells me that I have a loop in my requires but my package has no requirement.
I use this recipe for upload with Conan export-pkg :
# Standard library imports
import configparser
import os
import sys
# Related third party imports
import conans
class TlConanFile(conans.ConanFile):
settings = "os", "arch"
def __init__(self, output, runner, display_name="", user=None, channel=None): # pylint: disable=too-many-arguments
super().__init__(output, runner, display_name, user, channel)
if "--build-folder" in sys.argv:
# Conan checks the arguments and fails if the value is missing, the next argument is always the value
build_folder = sys.argv[sys.argv.index("--build-folder") + 1]
self.__class__.exports = os.path.relpath(os.path.join(build_folder, "..", "conanfile.txt"),
os.path.dirname(__file__))
elif "-bf" in sys.argv:
# Conan checks the arguments and fails if the value is missing, the next argument is always the value
build_folder = sys.argv[sys.argv.index("-bf") + 1]
self.__class__.exports = os.path.relpath(os.path.join(build_folder, "..", "conanfile.txt"),
os.path.dirname(__file__))
elif "-pf" in sys.argv:
# Conan checks the arguments and fails if the value is missing, the next argument is always the value
build_folder = sys.argv[sys.argv.index("-pf") + 1]
self.__class__.exports = os.path.relpath(os.path.join(build_folder, "..", "conanfile.txt"),
os.path.dirname(__file__))
elif "--package-folder" in sys.argv:
# Conan checks the arguments and fails if the value is missing, the next argument is always the value
build_folder = sys.argv[sys.argv.index("--package-folder") + 1]
self.__class__.exports = os.path.relpath(os.path.join(build_folder, "..", "conanfile.txt"),
os.path.dirname(__file__))
else:
# Simply assume that we are running the command in the build directory
build_folder = os.getcwd()
self.__class__.exports = os.path.relpath(os.path.join(build_folder, "..", "conanfile.txt"),
os.path.dirname(__file__))
def package(self):
self.copy("*.h", dst="include/aveer", src="output/include/aveer")
self.copy("*.i", dst="include/aveer/swig", src="output/include/aveer/swig")
self.copy("*.so*", dst="lib", src="output/lib", symlinks=True)
self.copy("*.cmake", dst="lib/cmake/aveer", src="output/lib/cmake/aveer")
self.copy("*.so", dst="lib/python3/dist-packages/aveer", src="output/lib/python3/dist-packages/aveer")
self.copy("*", dst="lib/python3/dist-packages/aveer", src="output/lib/python3/dist-packages/aveer")
self.copy("*.yml", dst="share/gnuradio/grc/blocks", src="output/share/gnuradio/grc/blocks")
#self.copy("*", dst="share/gnuradio/grc/blocks", src="share/gnuradio/grc/blocks") doc?
def package_info(self):
self.cpp_info.libs = conans.tools.collect_libs(self)
def requirements(self):
with open("../conanfile.txt") as conanfile_txt:
config = configparser.ConfigParser(allow_no_value=True, delimiters=["\0"])
config.optionxform = str
config.read_file(conanfile_txt)
for requirement in config['requires']:
self.requires(requirement)
to go with this conanfile.py, I also have this conanfile.txt:
[requires]
[generators]
cmake
[options]
that's it for the upload
and I use this conanfile.txt for the install:
[requires]
lib-grplugin/1.0.45#aveer_repo/Release
[generators]
cmake
[options]
[imports]
include/aveer, *.h -> ./output/include/aveer
include/aveer/swig, *.i ->./ output/include/aveer/swig
lib, *.so* -> ./output/lib
lib/cmake/aveer, *.cmake -> ./output/lib/cmake/aveer
lib/python3/dist-packages/aveer, *.so -> ./output/lib/python3/dist-packages/aveer
lib/python3/dist-packages/aveer, *.py -> ./output/lib/python3/dist-packages/aveer
share/gnuradio/grc/blocks, *.yml -> ./output/share/gnuradio/grc/blocks
when I try to run my conan install .. It gives me the following in the prompt:
Picture from the command prompt with the error
I also tried to install another package to test my profile/config and it has worked as intended.
As you can see I'm new here and even newer to conan, so if you need more info that I've not mention here plz let me know.

How to create the custom module in the ansible

This is the custom module I have written to get the datetime from the current system. I have put the module in the /usr/share/my_modules folder.
#!/usr/bin/python
import datetime
import json
date = str(datetime.datetime.now())
print(json.dumps({
"time" : date
}))
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
enabled = dict(required=True, type='bool'),
something = dict(aliases=['whatever'])
)
)
module.exit_json(changed=True, something_else=12345)
module.fail_json(msg="Something fatal happened")
from ansible.module_utils.basic import *
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
And now When I try to execute it using command ansible local -m timetest
I Am getting this error
127.0.0.1 | FAILED! => {
"failed": true,
"msg": "The module timetest was not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem."
}
why it is not executing my custom module ? please help me resolve this issue.
You can create the library directory inside the directory where your playbook exist, your file structure will look like this:
.
|-- playbook.yml
|-- library
`-- your-custom-module.py
Hope that might help you
Have you tried following Ansible test module instructions at http://ansible-docs.readthedocs.io/zh/stable-2.0/rst/developing_modules.html#testing-modules?
git clone git://github.com/ansible/ansible.git --recursive
source ansible/hacking/env-setup
chmod +x ansible/hacking/test-module
ansible/hacking/test-module -m ./timetest.py

Upload entire Bitbucket repo to S3 using Bitbucket Pipeline

I'm using Bitbuckets Pipeline. I want it to push the entire contents of my repo (very small) to S3. I don't want to have to zip it up, push to S3 and then unzip things. I just want it to take the existing file/folder structure in my Bitbucket repo and push that to S3.
What should the yaml file and .py file look like to accomplish this?
Here is the current yaml file:
image: python:3.5.1
pipelines:
branches:
master:
- step:
script:
# - apt-get update # required to install zip
# - apt-get install -y zip # required if you want to zip repository objects
- pip install boto3==1.3.0 # required for s3_upload.py
# the first argument is the name of the existing S3 bucket to upload the artefact to
# the second argument is the artefact to be uploaded
# the third argument is the the bucket key
# html files
- python s3_upload.py my-bucket-name html/index_template.html html/index_template.html # run the deployment script
# Example command line parameters. Replace with your values
#- python s3_upload.py bb-s3-upload SampleApp_Linux.zip SampleApp_Linux # run the deployment script
And here is my current python:
from __future__ import print_function
import os
import sys
import argparse
import boto3
from botocore.exceptions import ClientError
def upload_to_s3(bucket, artefact, bucket_key):
"""
Uploads an artefact to Amazon S3
"""
try:
client = boto3.client('s3')
except ClientError as err:
print("Failed to create boto3 client.\n" + str(err))
return False
try:
client.put_object(
Body=open(artefact, 'rb'),
Bucket=bucket,
Key=bucket_key
)
except ClientError as err:
print("Failed to upload artefact to S3.\n" + str(err))
return False
except IOError as err:
print("Failed to access artefact in this directory.\n" + str(err))
return False
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bucket", help="Name of the existing S3 bucket")
parser.add_argument("artefact", help="Name of the artefact to be uploaded to S3")
parser.add_argument("bucket_key", help="Name of the S3 Bucket key")
args = parser.parse_args()
if not upload_to_s3(args.bucket, args.artefact, args.bucket_key):
sys.exit(1)
if __name__ == "__main__":
main()
This requires me to list every single file in the repo in the yaml file as another command. I just want it to grab everything and upload it to S3.
You can change to use docker https://hub.docker.com/r/abesiyo/s3/
It runs quite well
bitbucket-pipelines.yml
image: abesiyo/s3
pipelines:
default:
- step:
script:
- s3 --region "us-east-1" rm s3://<bucket name>
- s3 --region "us-east-1" sync . s3://<bucket name>
Please also setup environment variables on bitbucket pipelines
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
Figured it out myself. Here is the python file, 's3_upload.py'
from __future__ import print_function
import os
import sys
import argparse
import boto3
#import zipfile
from botocore.exceptions import ClientError
def upload_to_s3(bucket, artefact, is_folder, bucket_key):
try:
client = boto3.client('s3')
except ClientError as err:
print("Failed to create boto3 client.\n" + str(err))
return False
if is_folder == 'true':
for root, dirs, files in os.walk(artefact, topdown=False):
print('Walking it')
for file in files:
#add a check like this if you just want certain file types uploaded
#if file.endswith('.js'):
try:
print(file)
client.upload_file(os.path.join(root, file), bucket, os.path.join(root, file))
except ClientError as err:
print("Failed to upload artefact to S3.\n" + str(err))
return False
except IOError as err:
print("Failed to access artefact in this directory.\n" + str(err))
return False
#else:
# print('Skipping file:' + file)
else:
print('Uploading file ' + artefact)
client.upload_file(artefact, bucket, bucket_key)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bucket", help="Name of the existing S3 bucket")
parser.add_argument("artefact", help="Name of the artefact to be uploaded to S3")
parser.add_argument("is_folder", help="True if its the name of a folder")
parser.add_argument("bucket_key", help="Name of file in bucket")
args = parser.parse_args()
if not upload_to_s3(args.bucket, args.artefact, args.is_folder, args.bucket_key):
sys.exit(1)
if __name__ == "__main__":
main()
and here is they bitbucket-pipelines.yml file:
---
image: python:3.5.1
pipelines:
branches:
dev:
- step:
script:
- pip install boto3==1.4.1 # required for s3_upload.py
- pip install requests
# the first argument is the name of the existing S3 bucket to upload the artefact to
# the second argument is the artefact to be uploaded
# the third argument is if the artefact is a folder
# the fourth argument is the bucket_key to use
- python s3_emptyBucket.py dev-slz-processor-repo
- python s3_upload.py dev-slz-processor-repo lambda true lambda
- python s3_upload.py dev-slz-processor-repo node_modules true node_modules
- python s3_upload.py dev-slz-processor-repo config.dev.json false config.json
stage:
- step:
script:
- pip install boto3==1.3.0 # required for s3_upload.py
- python s3_emptyBucket.py staging-slz-processor-repo
- python s3_upload.py staging-slz-processor-repo lambda true lambda
- python s3_upload.py staging-slz-processor-repo node_modules true node_modules
- python s3_upload.py staging-slz-processor-repo config.staging.json false config.json
master:
- step:
script:
- pip install boto3==1.3.0 # required for s3_upload.py
- python s3_emptyBucket.py prod-slz-processor-repo
- python s3_upload.py prod-slz-processor-repo lambda true lambda
- python s3_upload.py prod-slz-processor-repo node_modules true node_modules
- python s3_upload.py prod-slz-processor-repo config.prod.json false config.json
As an example for the dev branch, it grabs everything in the "lambda" folder, walks the entire structure of that folder, and for each item it finds, it uploads it to the dev-slz-processor-repo bucket
Lastly, here is a little helpful function, 's3_emptyBucket', to remove all objects from the bucket before uploading the new ones:
from __future__ import print_function
import os
import sys
import argparse
import boto3
#import zipfile
from botocore.exceptions import ClientError
def empty_bucket(bucket):
try:
resource = boto3.resource('s3')
except ClientError as err:
print("Failed to create boto3 resource.\n" + str(err))
return False
print("Removing all objects from bucket: " + bucket)
resource.Bucket(bucket).objects.delete()
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bucket", help="Name of the existing S3 bucket to empty")
args = parser.parse_args()
if not empty_bucket(args.bucket):
sys.exit(1)
if __name__ == "__main__":
main()
Atlassian now offers "Pipes" to simplify configuration of some common tasks. There's one for S3 upload as well.
No need to specify a different image type:
image: node:8
pipelines:
branches:
master:
- step:
script:
- pipe: atlassian/aws-s3-deploy:0.2.1
variables:
AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY
AWS_DEFAULT_REGION: "us-east-1"
S3_BUCKET: "your.bucket.name"
LOCAL_PATH: "dist"
For deploying a static website to Amazon S3 I have this bitbucket-pipelines.yml configuration file:
image: attensee/s3_website
pipelines:
default:
- step:
script:
- s3_website push
I’m using the attensee/s3_website docker image because that one has the awesome s3_website tool installed.
The configuration file of s3_website (s3_website.yml) [create this file in the root directory of the repository in Bitbucket] looks something like this:
s3_id: <%= ENV['S3_ID'] %>
s3_secret: <%= ENV['S3_SECRET'] %>
s3_bucket: bitbucket-pipelines
site : .
We have to define the environment variables S3_ID and S3_SECRET in environment variable ,from bit-bucket settings
Thankx to https://www.savjee.be/2016/06/Deploying-website-to-ftp-or-amazon-s3-with-BitBucket-Pipelines/
for the solution

cx_freeze ignores custom variables module

I'm trying to freeze a small GUI I made using PyQT5, Matplolib and Python 3.3.
I'm a beginner so please pardon my ignorance.
The program has:
- one QMainWindow
- two auxiliary QDialogs with Matplotlib plots
- one Variables_Module that I use to store variables (rather than using global)
- one custom Class called ROI
When running from Python terminal the code works fine.
Issue: When running the exe obtained with cx_freeze QMainWindow is loaded, user chooses data and data is immediately shown in first QDialog. Second QDialog uses variables stored in the "Variables_Module" and should show a matplotlib hist2d but nothing appears.
For this reason I think Variables_Modules does not get included in the build somehow.
the Main.py has these includes:
import sys, time
from PyQt5.QtCore import Qt, pyqtSignal, QObject
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QFormLayout, QVBoxLayout,QFileDialog, QDialog, QPushButton, QGridLayout, QMessageBox, QSlider, QSpinBox, QGroupBox, QLabel, QCheckBox, QComboBox, QColorDialog, QProgressDialog
from HySP_main_GUI import Ui_Main
import numpy
from matplotlib.pylab import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
#these are the "custom" modules
import Variables_Module
from ROI import Resizable_animated_rect
from scipy import ndimage
I tried including the "custom" modules in the setup.py in every combination as Includes or includefiles or packages with no solution.
I do understand I'm not supposed to put these modules everywhere but I'm out of ideas.
Also, all files (Main.py, ROI.py, Variables_Module.py) are in the same folder and no errors are given when running cx_freeze build.
The setup.py I'm using is:
import sys
from cx_Freeze import setup, Executable
includes = ["ROI","Variables_Module"]
includefiles = ['ROI.py','Variables_Module.py']
excludes = [ 'Tkinter']
packages = ["ROI","Variables_Module"]
path = []
# Dependencies are automatically detected, but it might need fine tuning.
build_exe_options = {
"includes": includes,
"include_files": includefiles,
"excludes": excludes,
"packages": packages,
"path": path
}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
exe = None
if sys.platform == "win32":
exe = Executable(
script="E:\\Python\\Projects\\test\\test.py",
initScript = None,
base="Win32GUI",
compress = True,
copyDependentFiles = True,
appendScriptToExe = False,
appendScriptToLibrary = False,
icon = None
)
setup(
name = "test",
version = "0.1",
author = 'test',
description = "My GUI!",
options = {"build_exe": build_exe_options},
executables = [exe]
)
It would appear that warnings halt the frozen exe.
Before plotting my new image some warnings regarding NaN were shown and apparently were preventing the app from going on with calculations.
I solved suppressing the warning in the troublesome function using:
import warnings
warnings.simplefilter("ignore")
this solves the problem. Took 2 days. -_-'