I have developed numerous iOS apps over the years so know Objective C reasonably well.
I'd like to build my first web service to offload some of the most processor intensive functions.
I'm leaning towards using my Mac as the server, which comes with Apache. I have configured this and it appears to be working as it should (I can type the Mac's IP address and receive a confirmation).
Now I'm trying to decide on how to build the server-side web service, which is totally new to me. I'd like to leverage my Objective C knowledge if possible. I think I'm looking for an Objective C-compatible web service engine and some examples how to connect it to browsers and mobile interfaces. I was leaning towards using Amazon's SimpleDB as the database.
BTW: I see Apple have Lion Server, but I cannot work out if this is an option.
Any thoughts/recommendations are appreciated.?
There are examples of simple web servers out there written in ObjC such as this and this.
That said, there are probably "better" ways of doing this if you don't mind using other technologies. This is a matter of preference; but I've use Python, MySQL, and the excellent web.py framework for these sorts of backends.
For example, here's an example web service (some redundancies omitted...) using the combination of technologies described. I just run this on my server, and it takes care of url redirection and serves JSON from the db.
import web
import json
import MySQLdb
urls = (
"/equip/gruppo", "gruppo", # GET = get all gruppos, # POST = save gruppo
"/equip/frame", "frame"
)
class StatusCode:
(Success,SuccessNoRows,FailConnect,FailQuery,FailMissingParam,FailOther) = range(6);
# top-level class that handles db interaction
class APIObject:
def __init__(self):
self.object_dict = {} # top-level dictionary to be turned into JSON
self.rows = []
self.cursor = ""
self.conn = ""
def dbConnect(self):
try:
self.conn = MySQLdb.connect( host = 'localhost', user = 'my_api_user', passwd = 'api_user_pw', db = 'my_db')
self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
except:
self.object_dict['api_status'] = StatusCode.FailConnect
return False
else:
return True
def queryExecute(self,query):
try:
self.cursor.execute(query)
self.rows = self.cursor.fetchall()
except:
self.object_dict['api_status'] = StatusCode.FailQuery
return False
else:
return True
class gruppo(APIObject):
def GET(self):
web.header('Content-Type', 'application/json')
if self.dbConnect() == False:
return json.dumps(self.object_dict,sort_keys=True, indent=4)
else:
if self.queryExecute("SELECT * FROM gruppos") == False:
return json.dumps(self.object_dict,sort_keys=True, indent=4)
else:
self.object_dict['api_status'] = StatusCode.SuccessNoRows if self.rows.count == 0 else StatusCode.Success
data_list = []
for row in self.rows:
# create a dictionary with the required elements
d = {}
d['id'] = row['id']
d['maker'] = row['maker_name']
d['type'] = row['type_name']
# append to the object list
data_list.append(d)
self.object_dict['data'] = data_list
# return to the client
return json.dumps(self.object_dict,sort_keys=True, indent=4)
Related
I am trying to extract the content of the [Documentation] section as a string for comparision with other part in a Python script.
I was told to use Robot framework API https://robot-framework.readthedocs.io/en/stable/
to extract but I have no idea how.
However, I am required to work with version 3.1.2
Example:
*** Test Cases ***
ATC Verify that Sensor Battery can enable and disable manufacturing mode
[Documentation] E1: This is the description of the test 1
... E2: This is the description of the test 2
[Tags] E1 TRACE{Trace_of_E1}
... E2 TRACE{Trace_of_E2}
Extract the string as
E1: This is the description of the test 1
E2: This is the description of the test 2
Have a look at these examples. I did something similar to generate testplans descritio. I tried to adapt my code to your requirements and this could maybe work for you.
import os
import re
from robot.api.parsing import (
get_model, get_tokens, Documentation, EmptyLine, KeywordCall,
ModelVisitor, Token
)
class RobotParser(ModelVisitor):
def __init__(self):
# Create object with remarkup_text to store formated documentation
self.text = ''
def get_text(self):
return self.text
def visit_TestCase(self, node):
# The matched `TestCase` node is a block with `header` and
# `body` attributes. `header` is a statement with familiar
# `get_token` and `get_value` methods for getting certain
# tokens or their value.
for keyword in node.body:
# skip empty lines
if keyword.get_value(Token.DOCUMENTATION) == None:
continue
self.text += keyword.get_value(Token.ARGUMENT)
def visit_Documentation(self,node):
# The matched "Documentation" node with value
self.remarkup_text += node.value + self.new_line
def visit_File(self, node):
# Call `generic_visit` to visit also child nodes.
return self.generic_visit(node)
if __name__ == "__main__":
path = "../tests"
for filename in os.listdir(path):
if re.match(".*\.robot", filename):
model = get_model(os.path.join(path, filename))
robot_parser = RobotParser()
robot_parser.visit(model)
text=robot_parser._text()
The code marked as best answer didn't quite work for me and has a lot of redundancy but it inspired me enough to get into the parsing and write it in a much readable and efficient way that actually works as is. You just have to have your own way of generating & iterating through filesystem where you call the get_robot_metadata(filepath) function.
from robot.api.parsing import (get_model, ModelVisitor, Token)
class RobotParser(ModelVisitor):
def __init__(self):
self.testcases = {}
def visit_TestCase(self, node):
testcasename = (node.header.name)
self.testcases[testcasename] = {}
for section in node.body:
if section.get_value(Token.DOCUMENTATION) != None:
documentation = section.value
self.testcases[testcasename]['Documentation'] = documentation
elif section.get_value(Token.TAGS) != None:
tags = section.values
self.testcases[testcasename]['Tags'] = tags
def get_testcases(self):
return self.testcases
def get_robot_metadata(filepath):
if filepath.endswith('.robot'):
robot_parser = RobotParser()
model = get_model(filepath)
robot_parser.visit(model)
metadata = robot_parser.get_testcases()
return metadata
This function will be able to extract the [Documentation] section from the testcase:
def documentation_extractor(testcase):
documentation = []
for setting in testcase.settings:
if len(setting) > 2 and setting[1].lower() == "[documentation]":
for doc in setting[2:]:
if doc.startswith("#"):
# the start of a comment, so skip rest of the line
break
documentation.append(doc)
break
return "\n".join(documentation)
I'm having some issues with persisting documents with pymongo when using insert_many.
I'm handing over a list of dicts to insert_many and it works fine from inside the same script that does the inserting. Less so once the script has finished.
def row_to_doc(row):
rowdict = row.to_dict()
for key in rowdict:
val = rowdict[key]
if type(val) == float or type(val) == np.float64:
if np.isnan(val):
# If we want a SQL style document collection
rowdict[key] = None
# If we want a NoSQL style document collection
# del rowdict[key]
return rowdict
def dataframe_to_collection(df):
n = len(df)
doc_list = []
for k in range(n):
doc_list.append(row_to_doc(df.iloc[k]))
return doc_list
def get_mongodb_client(host="localhost", port=27017):
return MongoClient(host, port)
def create_collection(client):
db = client["material"]
return db["master-data"]
def add_docs_to_mongo(collection, doc_list):
collection.insert_many(doc_list)
def main():
client = get_mongodb_client()
csv_fname = "some_csv_fname.csv"
df = get_clean_csv(csv_fname)
doc_list = dataframe_to_collection(df)
collection = create_collection(client)
add_docs_to_mongo(collection, doc_list)
test_doc = collection.find_one({"MATERIAL": "000000000000000001"})
When I open up another python REPL and start looking through the client.material.master_data collection with collection.find_one({"MATERIAL": "000000000000000001"}) or collection.count_documents({}) I get None for the find_one and 0 for the count_documents.
Is there a step where I need to call some method to persist the data to disk? db.collection.save() in the mongo client API sounds like what I need but it's just another way of inserting documents from what I have read. Any help would be greatly appreciated.
The problem was that I was getting my collection via client.db_name.collection_name and it wasn't getting the same collection I was creating with my code. client.db_name["collection-name"] solved my issue. Weird.
Using the FLASK framework in Python, my application needs to:
register and log in users (with either a sqlite or postgres database)
access a specific google spreadsheet that the logged in user owns and output that data in a json format.
I am required to have my own authorization & authentication system
I am having a lot of trouble figuring out how to even structure the application - what directories and sub-directories should I have?
I have done A LOT of playing around (about 1 months worth). I am using a virtual environment but don't know how to test my code well either. In general, my code runs but I have no idea how they work together really.** I am completely new to flask.**
Structuring the app:
|app
|----run.py
|----config.py
|----database
|---------database.db
|----app
|---------views.py
|---------models.py
|---------forms.py
|---------extensions.py
|----templates
|---------....
|----static
|--------....
Authorization / Authentication:
I have looked at Flask-Login, Flask-Auth, Flask-Security. I understand the general idea but do not know how to securely implement a complete authorization & authentication system.
app = Flask(__name__)
app.config.from_object(config)
login_manager = LoginManager()
login_manager.init_app(app)
def create_app():
db.init_app()
db.app = app
db.create_all()
return app
#app.route('/')
def index():
#needs to render the homepage template
#app.route('/signup', methods = ['GET', 'POST'])
def register():
form = SignupForm()
if request.method == 'GET':
return render_template('signup.html', form=form)
elif request.method == 'POST':
if form.validate_on_submit():
if User.query.filter_by(email=form.email.data).first():
return "email exists"
else:
newuser = User(form.email.data, form.password.data)
db.session.add(newuser)
db.session.commit()
login_user(newuser)
return "New User created"
else:
return "form didn't validate"
return "Signup"
#app.route('/login', methods = ['GET', 'POST'])
def login():
form = SignupForm()
if request.method == 'GET':
return render_template('login.html', form=form)
elif request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
if user.password == form.password.data:
login_user(user)
return "you are logged in"
else:
return "wrong password"
else:
return "user doesnt exist"
else:
return "form did not validate"
#login_manager.user_loader
def load_user(email):
return User.query.filter_by(email = email).first()
#app.route('/protected')
#login_required
def protected():
return "protected area for logged in users only"
if __name__ == '__main__':
#app.create_app()
app.run(port=5000, host='localhost')`
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required
import os
# Create app
app = Flask(__name__)
#app.config['DEBUG'] = True
app.config['SECRET_KEY'] = ''
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////'
app.config['SECURITY_PASSWORD_HASH'] = 'sha512_crypt'
app.config['SECURITY_PASSWORD_SALT'] = str(os.urandom(24))
# Create database connection object
db = SQLAlchemy(app)
# Define models
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
# Create a user to test with
#app.before_first_request
def create_user():
db.create_all()
user_datastore.create_user(email='', password='')
db.session.commit()
#app.route('/')
#login_required
def home():
#password = encrypt_password('mypass')
#print verify_and_update_password('mypass', password)
return "hello"
if __name__ == '__main__':
app.run(debug=True, use_reloader=False)
** I would really appreciate any guidance!**
Project structure:
If you're planning to build a larger Flask application, you should consider decomposing the functionality into Blueprints.
The official Flask documentation has a tutorial on how to structure larger applications:
http://flask.pocoo.org/docs/0.12/patterns/packages/
Also, take a look at the Hitchhiker's guide to organizing your project. It has some very good points: http://python-guide-pt-br.readthedocs.io/en/latest/writing/structure/
If you're designing an REST API consider using Flask-RESTful (which also works nicely with Blueprints)
ya'll, i figured it out & my app is LOOKING GOOD :)I am using blueprints & an application factory pattern.
APP
|_runserver.py
|_/app
|---__init__.py
|---config.py
|---extensions.py
|---forms.py
|---models.py
|---/login_dashboard #blueprint
|------__init__.py
|------views.py
|------/templates
|---------base.html
.
.
|------/static
|-----------/css
.
.
|-----------/js
.
.
|-----------/img
.
.
It is well understood that forking a process for running Python, as CGI does, is slower than embedding Python, as WSGI does. I would like to implement an XML-RPC interface using the SimpleXMLRPCServer included in the standard Python library and I already have an implementation that works via CGI. I believe there should be a faster way. I'd like to try WSGI but first I need a request handler for WSGI and there does not appear to be one in SimpleXMLRPCServer already. Am I all wet or is there no equivalent of this in the standard library under Python 2.6, 2.7, 3.x?
Here is my initial implementation of a WSGI replacement for CGIXMLRPCRequestHandler:
from xmlrpclib import SimpleXMLRPCDispatcher
class WSGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through WSGI."""
def __init__(self, allow_none = False, encoding = None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def __call__(self, environ, start_response):
"""Parse and handle a single XML-RPC request"""
result = []
method = environ['REQUEST_METHOD']
headers = [('Content-type', 'text/html')]
if method != 'POST':
# Default implementation indicates an error because XML-RPC uses the POST method.
code = 400
message, explain = BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
status = '%d %s' % (code, message)
if method == 'HEAD':
response = ''
else:
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % {'code' : code, 'message' : message, 'explain' : explain}
else:
# Dispatch XML-RPC to implementation
status = '200 OK'
request = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
response = self._marshaled_dispatch(request)
length = len(response)
if length > 0:
result = [response]
headers.append(('Content-length', str(length)))
start_response(status, headers)
return result
Take a look at this Hope it'll help.
I'm a newbee to python and django and I can't figure out what I'm doing wrong here.
I have a Site object:
class Site (models.Model):
domain = models.CharField(max_length=30)
support_status = models.CharField(max_length=20, choices= SITE_SUPPORTED_STATUS, blank=False)
requests = models.IntegerField()
objects = SiteManager()
def __unicode__(self):
return u'%s %s' % (self.domain, self.support_status)
And a SiteManager object
class SiteManager(models.Manager):
def supported_site_counts(self):
i = self.filter(support_status__iexact="SUPPORTED").count()
return i
From the console, the method "supported_site_counts()" works just fine
>>(InteractiveConsole)
>>> from bookmark.models import Site, SiteManager
>>> Site.objects.supported_site_counts()
>>>>2012-05-18 18:09:20,027 DEBUG (0.001) SELECT COUNT(*) FROM "bookmark_site" WHERE
>>>>"bookmark_site"."support_status" LIKE SUPPORTED ESCAPE '\' ; args=(u'SUPPORTED',)
>>>>2012-05-18 18:09:20,028 DEBUG Got 1 supported site
>>>>1
But when it's called from a testcase, the count returns as 0
class SiteManagerTest(unittest.TestCase):
def test_supported_site_counts(self):
self.x = False
self.count = Site.objects.supported_site_counts()
logging.debug(self.count)
This is probably because the tests will set up a database separate from your development database to run the tests in. You will need to put testing data in to the testing database, either programmatically or using fixtures.