add tinyusb

This commit is contained in:
Joey Castillo
2021-08-28 12:50:18 -04:00
parent c9e00b83bb
commit 39a5c822a2
1054 changed files with 188322 additions and 0 deletions

337
tinyusb/test/vendor/ceedling/bin/ceedling vendored Executable file
View File

@@ -0,0 +1,337 @@
#!/usr/bin/env ruby
#these are always used
require 'rubygems'
require 'fileutils'
# Check for the main project file (either the one defined in the ENV or the default)
main_filepath = ENV['CEEDLING_MAIN_PROJECT_FILE']
project_found = (!main_filepath.nil? && File.exists?(main_filepath))
if (!project_found)
main_filepath = "project.yml"
project_found = File.exists?(main_filepath)
end
def is_windows?
return ((RbConfig::CONFIG['host_os'] =~ /mswin|mingw/) ? true : false) if defined?(RbConfig)
return ((Config::CONFIG['host_os'] =~ /mswin|mingw/) ? true : false)
end
unless (project_found)
#===================================== We Do Not Have A Project ================================================
puts "Welcome to Ceedling!"
require 'thor'
def here
File.dirname(__FILE__) + "/.."
end
class CeedlingTasks < Thor
include Thor::Actions
desc "new PROJECT_NAME", "create a new ceedling project"
method_option :docs, :type => :boolean, :default => false, :desc => "Add docs in project vendor directory"
method_option :local, :type => :boolean, :default => false, :desc => "Create a copy of Ceedling in the project vendor directory"
method_option :gitignore, :type => :boolean, :default => false, :desc => "Create a gitignore file for ignoring ceedling generated files"
method_option :no_configs, :type => :boolean, :default => false, :desc => "Don't install starter configuration files"
method_option :noconfigs, :type => :boolean, :default => false
#deprecated:
method_option :no_docs, :type => :boolean, :default => false
method_option :nodocs, :type => :boolean, :default => false
method_option :as_gem, :type => :boolean, :default => false
method_option :asgem, :type => :boolean, :default => false
method_option :with_ignore, :type => :boolean, :default => false
method_option :withignore, :type => :boolean, :default => false
def new(name, silent = false)
copy_assets_and_create_structure(name, silent, false, options)
end
desc "upgrade PROJECT_NAME", "upgrade ceedling for a project (not req'd if gem used)"
method_option :docs, :type => :boolean, :default => false, :desc => "Add docs in project vendor directory"
method_option :local, :type => :boolean, :default => false, :desc => "Create a copy of Ceedling in the project vendor directory"
method_option :no_configs, :type => :boolean, :default => false, :desc => "Don't install starter configuration files"
method_option :noconfigs, :type => :boolean, :default => false
#deprecated:
method_option :no_docs, :type => :boolean, :default => false
method_option :nodocs, :type => :boolean, :default => false
def upgrade(name, silent = false)
copy_assets_and_create_structure(name, silent, true, options || {:upgrade => true})
end
no_commands do
def copy_assets_and_create_structure(name, silent=false, force=false, options = {})
puts "WARNING: --no_docs deprecated. It is now the default. Specify -docs if you want docs installed." if (options[:no_docs] || options[:nodocs])
puts "WARNING: --as_gem deprecated. It is now the default. Specify -local if you want ceedling installed to this project." if (options[:as_gem] || options[:asgem])
puts "WARNING: --with_ignore deprecated. It is now called -gitignore" if (options[:with_ignore] || options[:with_ignore])
use_docs = options[:docs] || false
use_configs = !(options[:no_configs] || options[:noconfigs] || false)
use_gem = !(options[:local])
use_ignore = options[:gitignore] || false
is_upgrade = options[:upgrade] || false
ceedling_path = File.join(name, 'vendor', 'ceedling')
source_path = File.join(name, 'src')
test_path = File.join(name, 'test')
test_support_path = File.join(name, 'test/support')
# If it's not an upgrade, make sure we have the paths we expect
if (!is_upgrade)
[source_path, test_path, test_support_path].each do |d|
FileUtils.mkdir_p d
end
end
# Genarate gitkeep in test support path
FileUtils.touch(File.join(test_support_path, '.gitkeep'))
# If documentation requested, create a place to dump them and do so
if use_docs
doc_path = File.join(ceedling_path, 'docs')
FileUtils.mkdir_p doc_path
in_doc_path = lambda {|f| File.join(doc_path, f)}
doc_files = [
'docs/CeedlingPacket.md',
'vendor/c_exception/docs/CException.md',
'vendor/cmock/docs/CMock_Summary.md',
'vendor/unity/docs/UnityAssertionsCheatSheetSuitableforPrintingandPossiblyFraming.pdf',
'vendor/unity/docs/UnityAssertionsReference.md',
'vendor/unity/docs/UnityConfigurationGuide.md',
'vendor/unity/docs/UnityGettingStartedGuide.md',
'vendor/unity/docs/UnityHelperScriptsGuide.md',
'vendor/unity/docs/ThrowTheSwitchCodingStandard.md',
]
doc_files.each do |f|
copy_file(f, in_doc_path.call(File.basename(f)), :force => force)
end
end
# If installed locally to project, copy ceedling, unity, cmock, & supports to vendor
unless use_gem
FileUtils.mkdir_p ceedling_path
#copy full folders from ceedling gem into project
%w{plugins lib bin}.map do |f|
{:src => f, :dst => File.join(ceedling_path, f)}
end.each do |f|
directory(f[:src], f[:dst], :force => force)
end
# mark ceedling as an executable
File.chmod(0755, File.join(ceedling_path, 'bin', 'ceedling')) unless is_windows?
#copy necessary subcomponents from ceedling gem into project
sub_components = [
{:src => 'vendor/c_exception/lib/', :dst => 'vendor/c_exception/lib'},
{:src => 'vendor/cmock/config/', :dst => 'vendor/cmock/config'},
{:src => 'vendor/cmock/lib/', :dst => 'vendor/cmock/lib'},
{:src => 'vendor/cmock/src/', :dst => 'vendor/cmock/src'},
{:src => 'vendor/deep_merge/lib/', :dst => 'vendor/deep_merge/lib'},
{:src => 'vendor/diy/lib', :dst => 'vendor/diy/lib'},
{:src => 'vendor/unity/auto/', :dst => 'vendor/unity/auto'},
{:src => 'vendor/unity/src/', :dst => 'vendor/unity/src'},
]
sub_components.each do |c|
directory(c[:src], File.join(ceedling_path, c[:dst]), :force => force)
end
end
# We're copying in a configuration file if we haven't said not to
if (use_configs)
if use_gem
copy_file(File.join('assets', 'project_as_gem.yml'), File.join(name, 'project.yml'), :force => force)
else
copy_file(File.join('assets', 'project_with_guts.yml'), File.join(name, 'project.yml'), :force => force)
if is_windows?
copy_file(File.join('assets', 'ceedling.cmd'), File.join(name, 'ceedling.cmd'), :force => force)
else
copy_file(File.join('assets', 'ceedling'), File.join(name, 'ceedling'), :force => force)
File.chmod(0755, File.join(name, 'ceedling'))
end
end
end
# Copy the gitignore file if requested
if (use_ignore)
copy_file(File.join('assets', 'default_gitignore'), File.join(name, '.gitignore'), :force => force)
end
unless silent
puts "\n"
puts "Project '#{name}' #{force ? "upgraded" : "created"}!"
puts " - Tool documentation is located in vendor/ceedling/docs" if use_docs
puts " - Execute 'ceedling help' to view available test & build tasks"
puts ''
end
end
end
desc "examples", "list available example projects"
def examples()
puts "Available sample projects:"
FileUtils.cd(File.join(here, "examples")) do
Dir["*"].each {|proj| puts " #{proj}"}
end
end
desc "example PROJ_NAME [DEST]", "new specified example project (in DEST, if specified)"
def example(proj_name, dest=nil)
if dest.nil? then dest = proj_name end
copy_assets_and_create_structure(dest, true, false, {:local=>true, :docs=>true})
dest_src = File.join(dest,'src')
dest_test = File.join(dest,'test')
dest_project = File.join(dest,'project.yml')
directory "examples/#{proj_name}/src", dest_src
directory "examples/#{proj_name}/test", dest_test
remove_file dest_project
copy_file "examples/#{proj_name}/project.yml", dest_project
puts "\n"
puts "Example project '#{proj_name}' created!"
puts " - Tool documentation is located in vendor/ceedling/docs"
puts " - Execute 'ceedling help' to view available test & build tasks"
puts ''
end
desc "version", "return the version of the tools installed"
def version()
require 'ceedling/version.rb'
puts " Ceedling:: #{Ceedling::Version::CEEDLING}"
puts " CMock:: #{Ceedling::Version::CMOCK}"
puts " Unity:: #{Ceedling::Version::UNITY}"
puts " CException:: #{Ceedling::Version::CEXCEPTION}"
end
end
if (ARGV[0] =~ /^\-T$/)
puts "\n(No Project Detected, Therefore Showing Options to Create Projects)"
CeedlingTasks.tasks.each_pair do |k,v|
puts v.usage.ljust(25,' ') + v.description
end
puts "\n"
else
CeedlingTasks.source_root here
CeedlingTasks.start
end
#===================================== We Have A Project Already ================================================
else
require 'yaml'
require 'rbconfig'
#determine platform
platform = begin
case(RbConfig::CONFIG['host_os'])
when /mswin|mingw|cygwin/i
:mswin
when /darwin/
:osx
else
:linux
end
rescue
:linux
end
#create our default meta-runner option set
options = {
:pretest => nil,
:args => [],
:add_path => [],
:path_connector => (platform == :mswin) ? ";" : ":",
:graceful_fail => false,
:which_ceedling => (Dir.exists?("vendor/ceedling") ? "vendor/ceedling" : 'gem'),
:default_tasks => [ 'test:all' ],
:list_tasks => false
}
#guess that we need a special script file first if it exists
if (platform == :mswin)
options[:pretest] = File.exists?("#{ platform.to_s }_setup.bat") ? "#{ platform.to_s }_setup.bat" : nil
else
options[:pretest] = File.exists?("#{ platform.to_s }_setup.sh") ? "source #{ platform.to_s }_setup.sh" : nil
end
#merge in project settings if they can be found here
yaml_options = YAML.load_file(main_filepath)
if (yaml_options[:paths])
options[:add_path] = yaml_options[:paths][:tools] || []
else
options[:add_path] = []
end
options[:graceful_fail] = yaml_options[:graceful_fail] if yaml_options[:graceful_fail]
options[:which_ceedling] = yaml_options[:project][:which_ceedling] if (yaml_options[:project] && yaml_options[:project][:which_ceedling])
options[:default_tasks] = yaml_options[:default_tasks] if yaml_options[:default_tasks]
#sort through command line options
ARGV.each do |v|
case(v)
when /^(?:new|examples?|templates?)$/
puts "\nOops. You called ceedling with argument '#{v}'.\n" +
" This is an operation that will create a new project... \n" +
" but it looks like you're already in a project. If you really \n" +
" want to do this, try moving to an empty folder.\n\n"
abort
when /^help$/
options[:list_tasks] = true
when /^-T$/
options[:list_tasks] = true
when /^project:(\w+)/
ENV['CEEDLING_USER_PROJECT_FILE'] = "#{$1}.yml"
else
options[:args].push(v)
end
end
#add to the path
if (options[:add_path] && !options[:add_path].empty?)
path = ENV["PATH"]
options[:add_path].each do |p|
f = File.expand_path(File.dirname(__FILE__),p)
path = (f + options[:path_connector] + path) unless path.include? f
end
ENV["PATH"] = path
end
# Load Ceedling (either through the rakefile OR directly)
if (File.exists?("rakefile.rb"))
load 'rakefile.rb'
else
if (options[:which_ceedling] == 'gem')
require 'ceedling'
else
load "#{options[:which_ceedling]}/lib/ceedling.rb"
end
Ceedling.load_project
end
Rake.application.standard_exception_handling do
if options[:list_tasks]
# Display helpful task list when requested. This required us to dig into Rake internals a bit
Rake.application.define_singleton_method(:name=) {|n| @name = n}
Rake.application.name = 'ceedling'
Rake.application.options.show_tasks = :tasks
Rake.application.options.show_task_pattern = /^(?!.*build).*$/
Rake.application.display_tasks_and_comments()
else
task :default => options[:default_tasks]
# Run our Tasks!
Rake.application.collect_command_line_tasks(options[:args])
Rake.application.top_level
end
end
true
#===================================================================================================================
end

View File

@@ -0,0 +1,292 @@
CException
==========
CException is a basic exception framework for C, suitable for use in
embedded applications. It provides an exception framework similar in
use to C++, but with much less overhead.
CException uses C standard library functions `setjmp` and `longjmp` to
operate. As long as the target system has these two functions defined,
this library should be useable with very little configuration. It
even supports environments where multiple program flows are in use,
such as real-time operating systems.
There are about a gabillion exception frameworks using a similar
setjmp/longjmp method out there... and there will probably be more
in the future. Unfortunately, when we started our last embedded
project, all those that existed either (a) did not support multiple
tasks (therefore multiple stacks) or (b) were way more complex than
we really wanted. CException was born.
*Why use CException?*
0. It's ANSI C, and it beats passing error codes around.
1. You want something simple... CException throws a single id. You can
define those ID's to be whatever you like. You might even choose which
type that number is for your project. But that's as far as it goes.
We weren't interested in passing objects or structs or strings...
just simple error codes.
2. Performance... CException can be configured for single tasking or
multitasking. In single tasking, there is very little overhead past
the setjmp/longjmp calls (which are already fast). In multitasking,
your only additional overhead is the time it takes you to determine
a unique task id 0 - num_tasks.
For the latest version, go to [ThrowTheSwitch.org](http://throwtheswitch.org)
CONTENTS OF THIS DOCUMENT
=========================
* Usage
* Limitations
*API
* Configuration
* Testing
* License
Usage
-----
Code that is to be protected are wrapped in `Try { } Catch { }` blocks.
The code directly following the Try call is "protected", meaning that
if any Throws occur, program control is directly transferred to the
start of the Catch block.
A numerical exception ID is included with Throw, and is made accessible
from the Catch block.
Throws can occur from within function calls (nested as deeply as you
like) or directly from within the function itself.
Limitations
-----------
This library was made to be as fast as possible, and provide basic
exception handling. It is not a full-blown exception library. Because
of this, there are a few limitations that should be observed in order
to successfully utilize this library:
1. Do not directly "return" from within a `Try` block, nor `goto`
into or out of a `Try` block.
*Why?*
The `Try` macro allocates some local memory and alters a global
pointer. These are cleaned up at the top of the `Catch` macro.
Gotos and returns would bypass some of these steps, resulting in
memory leaks or unpredictable behavior.
2. If (a) you change local (stack) variables within your `Try` block,
AND (b) wish to make use of the updated values after an exception
is thrown, those variables should be made `volatile`. Note that this
is ONLY for locals and ONLY when you need access to them after a
`Throw`.
*Why?*
Compilers optimize. There is no way to guarantee that the actual
memory location was updated and not just a register unless the
variable is marked volatile.
3. Memory which is `malloc`'d or `new`'d is not automatically released
when an error is thrown. This will sometimes be desirable, and
othertimes may not. It will be the responsibility of the `Catch`
block to perform this kind of cleanup.
*Why?*
There's just no easy way to track `malloc`'d memory, etc., without
replacing or wrapping malloc calls or something like that. This
is a light framework, so these options were not desirable.
API
---
###Try
`Try` is a macro which starts a protected block. It MUST be followed by
a pair of braces or a single protected line (similar to an 'if'),
enclosing the data that is to be protected. It **must** be followed by a
`Catch` block (don't worry, you'll get compiler errors to let you know if
you mess any of that up).
###Catch(e)
`Catch` is a macro which ends the `Try` block and starts the error handling
block. The `Catch` block is called if and only if an exception was thrown
while within the `Try` block. This error was thrown by a `Throw` call
somewhere within `Try` (or within a function called within `Try`, or a function
called by a function called within `Try`, etc).
The single parameter `e` is filled with the error code which was thrown.
This can be used for reporting, conditional cleanup, etc. (or you can just
ignore it if you really want... people ignore return codes all the time,
right?). `e` should be of type `EXCEPTION_T`
###Throw(e)
This is the method of throwing an error. A `Throw` should only occur from within a
protected (`Try` ... `Catch`) block, though it may easily be nested many function
calls deep without an impact on performance or functionality. `Throw` takes
a single argument, which is an exception id which will be passed to `Catch`
as the reason for the error.
If you wish to rethrow an error, this can be done by calling `Throw(e)` with
the error code you just caught. It **is** valid to throw from a catch block.
###ExitTry()
On rare occasion, you might want to immediately exit your current `Try` block
but **not** treat this as an error. Don't run the `Catch`. Just start executing
from after the `Catch` as if nothing had happened... That's what `ExitTry` is
for.
CONFIGURATION
-------------
CException is a mostly portable library. It has one universal
dependency, and some macros which are required if working in a
multi-tasking environment.
1. The standard C library setjmp must be available. Since this is part
of the standard library, chances are good that you'll be fine.
2. If working in a multitasking environment, methods for obtaining an
index into an array of frames and to get the overall number of
id's are required. If the OS supports a method to retrieve Task
ID's, and those Tasks are number 0, 1, 2... you are in an ideal
situation. Otherwise, a more creative mapping function may be
required. Note that this function is likely to be called twice
for each protected block and once during a throw. This is the
only overhead in the system.
Exception.h
-----------
By convention, most projects include `Exception.h` which defines any
further requirements, then calls `CException.h` to do the gruntwork. All
of these are optional. You could directly include `CException.h` if
you wanted and just use the defaults provided.
* `EXCEPTION_T`
* Set this to the type you want your exception id's to be. Defaults to 'unsigned int'.
* `EXCEPTION_NONE`
* Set this to a number which will never be an exception id in your system. Defaults to `0x5a5a5a5a`.
* `EXCEPTION_GET_ID`
* If in a multi-tasking environment, this should be
set to be a call to the function described in #2 above.
Defaults to just return `0` all the time (good for
single tasking environments)
* `EXCEPTION_NUM_ID`
* If in a multi-tasking environment, this should be set
to the number of ID's required (usually the number of
tasks in the system). Defaults to `1` (for single
tasking environments).
* `CEXCEPTION_NO_CATCH_HANDLER(id)`
* This macro can be optionally specified.
It allows you to specify code to be called when a Throw
is made outside of `Try` ... `Catch` protection. Consider
this the emergency fallback plan for when something has
gone terribly wrong.
You may also want to include any header files which will commonly be
needed by the rest of your application where it uses exception handling
here. For example, OS header files or exception codes would be useful.
Finally, there are some hook macros which you can implement to inject
your own target-specific code in particular places. It is a rare instance
where you will need these, but they are here if you need them:
* `CEXCEPTION_HOOK_START_TRY`
* called immediately before the Try block
* `CEXCEPTION_HOOK_HAPPY_TRY`
* called immediately after the Try block if no exception was thrown
* `CEXCEPTION_HOOK_AFTER_TRY`
* called immediately after the Try block OR before an exception is caught
* `CEXCEPTION_HOOK_START_CATCH`
* called immediately before the catch
TESTING
-------
If you want to validate that CException works with your tools or that
it works with your custom configuration, you may want to run the test
suite.
The test suite included makes use of the `Unity` Test Framework. It will
require a native C compiler. The example makefile uses MinGW's gcc.
Modify the makefile to include the proper paths to tools, then run `make`
to compile and run the test application.
* `C_COMPILER`
* The C compiler to use to perform the tests
* `C_LIBS`
* The path to the C libraries (including setjmp)
* `UNITY_DIR`
* The path to the Unity framework (required to run tests)
(get it at [ThrowTheSwitch.org](http://throwtheswitch.org))
LICENSE
-------
This software is licensed under the MIT License
Copyright (c) 2007-2017 Mark VanderVoord
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,603 @@
CMock: A Summary
================
*[ThrowTheSwitch.org](http://throwtheswitch.org)*
*This documentation is released under a Creative Commons 3.0 Attribution Share-Alike License*
What Exactly Are We Talking About Here?
---------------------------------------
CMock is a nice little tool which takes your header files and creates
a Mock interface for it so that you can more easily unit test modules
that touch other modules. For each function prototype in your
header, like this one:
int DoesSomething(int a, int b);
...you get an automatically generated DoesSomething function
that you can link to instead of your real DoesSomething function.
By using this Mocked version, you can then verify that it receives
the data you want, and make it return whatever data you desire,
make it throw errors when you want, and more... Create these for
everything your latest real module touches, and you're suddenly
in a position of power: You can control and verify every detail
of your latest creation.
To make that easier, CMock also gives you a bunch of functions
like the ones below, so you can tell that generated DoesSomething
function how to behave for each test:
void DoesSomething_ExpectAndReturn(int a, int b, int toReturn);
void DoesSomething_ExpectAndThrow(int a, int b, EXCEPTION_T error);
void DoesSomething_StubWithCallback(CMOCK_DoesSomething_CALLBACK YourCallback);
void DoesSomething_IgnoreAndReturn(int toReturn);
You can pile a bunch of these back to back, and it remembers what
you wanted to pass when, like so:
test_CallsDoesSomething_ShouldDoJustThat(void)
{
DoesSomething_ExpectAndReturn(1,2,3);
DoesSomething_ExpectAndReturn(4,5,6);
DoesSomething_ExpectAndThrow(7,8, STATUS_ERROR_OOPS);
CallsDoesSomething( );
}
This test will call CallsDoesSomething, which is the function
we are testing. We are expecting that function to call DoesSomething
three times. The first time, we check to make sure it's called
as DoesSomething(1, 2) and we'll magically return a 3. The second
time we check for DoesSomething(4, 5) and we'll return a 6. The
third time we verify DoesSomething(7, 8) and we'll throw an error
instead of returning anything. If CallsDoesSomething gets
any of this wrong, it fails the test. It will fail if you didn't
call DoesSomething enough, or too much, or with the wrong arguments,
or in the wrong order.
CMock is based on Unity, which it uses for all internal testing.
It uses Ruby to do all the main work (versions 2.0.0 and above).
Installing
==========
The first thing you need to do to install CMock is to get yourself
a copy of Ruby. If you're on linux or osx, you probably already
have it. You can prove it by typing the following:
ruby --version
If it replied in a way that implies ignorance, then you're going to
need to install it. You can go to [ruby-lang](https://ruby-lang.org)
to get the latest version. You're also going to need to do that if it
replied with a version that is older than 2.0.0. Go ahead. We'll wait.
Once you have Ruby, you have three options:
* Clone the latest [CMock repo on github](https://github.com/ThrowTheSwitch/CMock/)
* Download the latest [CMock zip from github](https://github.com/ThrowTheSwitch/CMock/)
* Install Ceedling (which has it built in!) through your commandline using `gem install ceedling`.
Generated Mock Module Summary
=============================
In addition to the mocks themselves, CMock will generate the
following functions for use in your tests. The expect functions
are always generated. The other functions are only generated
if those plugins are enabled:
Expect:
-------
Your basic staple Expects which will be used for most of your day
to day CMock work. By calling this, you are telling CMock that you
expect that function to be called during your test. It also specifies
which arguments you expect it to be called with, and what return
value you want returned when that happens. You can call this function
multiple times back to back in order to queue up multiple calls.
* `void func(void)` => `void func_Expect(void)`
* `void func(params)` => `void func_Expect(expected_params)`
* `retval func(void)` => `void func_ExpectAndReturn(retval_to_return)`
* `retval func(params)` => `void func_ExpectAndReturn(expected_params, retval_to_return)`
ExpectAnyArgs:
--------------
This behaves just like the Expects calls, except that it doesn't really
care what the arguments are that the mock gets called with. It still counts
the number of times the mock is called and it still handles return values
if there are some.
* `void func(void)` => `void func_ExpectAnyArgs(void)`
* `void func(params)` => `void func_ExpectAnyArgs(void)`
* `retval func(void)` => `void func_ExpectAnyArgsAndReturn(retval_to_return)`
* `retval func(params)` => `void func_ExpectAnyArgsAndReturn(retval_to_return)`
Array:
------
An ExpectWithArray is another variant of Expect. Like expect, it cares about
the number of times a mock is called, the arguments it is called with, and the
values it is to return. This variant has another feature, though. For anything
that resembles a pointer or array, it breaks the argument into TWO arguments.
The first is the original pointer. The second specify the number of elements
it is to verify of that array. If you specify 1, it'll check one object. If 2,
it'll assume your pointer is pointing at the first of two elements in an array.
If you specify zero elements, it will check just the pointer if
`:smart` mode is configured or fail if `:compare_data` is set.
* `void func(void)` => (nothing. In fact, an additional function is only generated if the params list contains pointers)
* `void func(ptr * param, other)` => `void func_ExpectWithArray(ptr* param, int param_depth, other)`
* `retval func(void)` => (nothing. In fact, an additional function is only generated if the params list contains pointers)
* `retval func(other, ptr* param)` => `void func_ExpectWithArrayAndReturn(other, ptr* param, int param_depth, retval_to_return)`
Ignore:
-------
Maybe you don't care about the number of times a particular function is called or
the actual arguments it is called with. In that case, you want to use Ignore. Ignore
only needs to be called once per test. It will then ignore any further calls to that
particular mock. The IgnoreAndReturn works similarly, except that it has the added
benefit of knowing what to return when that call happens. If the mock is called more
times than IgnoreAndReturn was called, it will keep returning the last value without
complaint. If it's called less times, it will also ignore that. You SAID you didn't
care how many times it was called, right?
* `void func(void)` => `void func_Ignore(void)`
* `void func(params)` => `void func_Ignore(void)`
* `retval func(void)` => `void func_IgnoreAndReturn(retval_to_return)`
* `retval func(params)` => `void func_IgnoreAndReturn(retval_to_return)`
Ignore Arg:
------------
Maybe you overall want to use Expect and its similar variations, but you don't care
what is passed to a particular argument. This is particularly useful when that argument
is a pointer to a value that is supposed to be filled in by the function. You don't want
to use ExpectAnyArgs, because you still care about the other arguments. Instead, before
any of your Expect calls are made, you can call this function. It tells CMock to ignore
a particular argument for the rest of this test, for this mock function.
* `void func(params)` => `void func_IgnoreArg_paramName(void)`
ReturnThruPtr:
--------------
Another option which operates on a particular argument of a function is the ReturnThruPtr
plugin. For every argument that resembles a pointer or reference, CMock generates an
instance of this function. Just as the AndReturn functions support injecting one or more
return values into a queue, this function lets you specify one or more return values which
are queued up and copied into the space being pointed at each time the mock is called.
* `void func(param1)` => `void func_ReturnThruPtr_paramName(val_to_return)`
* => `void func_ReturnArrayThruPtr_paramName(cal_to_return, len)`
* => `void func_ReturnMemThruPtr_paramName(val_to_return, size)`
Callback:
---------
If all those other options don't work, and you really need to do something custom, you
still have a choice. As soon as you stub a callback in a test, it will call the callback
whenever the mock is encountered and return the retval returned from the callback (if any)
instead of performing the usual expect checks. It can be configured to check the arguments
first (like expects) or just jump directly to the callback.
* `void func(void)` => `void func_StubWithCallback(CMOCK_func_CALLBACK callback)`
where `CMOCK_func_CALLBACK` looks like: `void func(int NumCalls)`
* `void func(params)` => `void func_StubWithCallback(CMOCK_func_CALLBACK callback)`
where `CMOCK_func_CALLBACK` looks like: `void func(params, int NumCalls)`
* `retval func(void)` => `void func_StubWithCallback(CMOCK_func_CALLBACK callback)`
where `CMOCK_func_CALLBACK` looks like: `retval func(int NumCalls)`
* `retval func(params)` => `void func_StubWithCallback(CMOCK_func_CALLBACK callback)`
where `CMOCK_func_CALLBACK` looks like: `retval func(params, int NumCalls)`
Cexception:
-----------
Finally, if you are using Cexception for error handling, you can use this to throw errors
from inside mocks. Like Expects, it remembers which call was supposed to throw the error,
and it still checks parameters first.
* `void func(void)` => `void func_ExpectAndThrow(value_to_throw)`
* `void func(params)` => `void func_ExpectAndThrow(expected_params, value_to_throw)`
* `retval func(void)` => `void func_ExpectAndThrow(value_to_throw)`
* `retval func(params)` => `void func_ExpectAndThrow(expected_params, value_to_throw)`
Running CMock
=============
CMock is a Ruby script and class. You can therefore use it directly
from the command line, or include it in your own scripts or rakefiles.
Mocking from the Command Line
-----------------------------
After unpacking CMock, you will find cmock.rb in the 'lib' directory.
This is the file that you want to run. It takes a list of header files
to be mocked, as well as an optional yaml file for a more detailed
configuration (see config options below).
For example, this will create three mocks using the configuration
specified in MyConfig.yml:
ruby cmock.rb -oMyConfig.yml super.h duper.h awesome.h
And this will create two mocks using the default configuration:
ruby cmock.rb ../mocking/stuff/is/fun.h ../try/it/yourself.h
Mocking From Scripts or Rake
----------------------------
CMock can be used directly from your own scripts or from a rakefile.
Start by including cmock.rb, then create an instance of CMock.
When you create your instance, you may initialize it in one of
three ways.
You may specify nothing, allowing it to run with default settings:
require 'cmock.rb'
cmock = CMock.new
You may specify a YAML file containing the configuration options
you desire:
cmock = CMock.new('../MyConfig.yml')
You may specify the options explicitly:
cmock = Cmock.new(:plugins => [:cexception, :ignore], :mock_path => 'my/mocks/')
Config Options:
---------------
The following configuration options can be specified in the
yaml file or directly when instantiating.
Passed as Ruby, they look like this:
{ :attributes => [“__funky”, “__intrinsic”], :when_ptr => :compare }
Defined in the yaml file, they look more like this:
:cmock:
:attributes:
- __funky
- __intrinsic
:when_ptr: :compare
In all cases, you can just include the things that you want to override
from the defaults. We've tried to specify what the defaults are below.
* `:attributes`:
These are attributes that CMock should ignore for you for testing
purposes. Custom compiler extensions and externs are handy things to
put here. If your compiler is choking on some extended syntax, this
is often a good place to look.
* defaults: ['__ramfunc', '__irq', '__fiq', 'register', 'extern']
* **note:** this option will reinsert these attributes onto the mock's calls.
If that isn't what you are looking for, check out :strippables.
* `:c_calling_conventions`:
Similarly, CMock may need to understand which C calling conventions
might show up in your codebase. If it encounters something it doesn't
recognize, it's not going to mock it. We have the most common covered,
but there are many compilers out there, and therefore many other options.
* defaults: ['__stdcall', '__cdecl', '__fastcall']
* **note:** this option will reinsert these attributes onto the mock's calls.
If that isn't what you are looking for, check out :strippables.
* `:callback_after_arg_check`:
Tell `:callback` plugin to do the normal argument checking **before** it
calls the callback function by setting this to true. When false, the
callback function is called **instead** of the argument verification.
* default: false
* `:callback_include_count`:
Tell `:callback` plugin to include an extra parameter to specify the
number of times the callback has been called. If set to false, the
callback has the same interface as the mocked function. This can be
handy when you're wanting to use callback as a stub.
* default: true
* `:cexception_include`:
Tell `:cexception` plugin where to find CException.h... You only need to
define this if it's not in your build path already... which it usually
will be for the purpose of your builds.
* default: *nil*
* `:enforce_strict_ordering`:
CMock always enforces the order that you call a particular function,
so if you expect GrabNabber(int size) to be called three times, it
will verify that the sizes are in the order you specified. You might
*also* want to make sure that all different functions are called in a
particular order. If so, set this to true.
* default: false
* `:framework`:
Currently the only option is `:unity.` Eventually if we support other
unity test frameworks (or if you write one for us), they'll get added
here.
: default: :unity
* `:includes`:
An array of additional include files which should be added to the
mocks. Useful for global types and definitions used in your project.
There are more specific versions if you care WHERE in the mock files
the includes get placed. You can define any or all of these options.
* `:includes`
* `:includes_h_pre_orig_header`
* `:includes_h_post_orig_header`
* `:includes_c_pre_header`
* `:includes_c_post_header`
* default: nil #for all 5 options
* `:memcmp_if_unknown`:
C developers create a lot of types, either through typedef or preprocessor
macros. CMock isn't going to automatically know what you were thinking all
the time (though it tries its best). If it comes across a type it doesn't
recognize, you have a choice on how you want it to handle it. It can either
perform a raw memory comparison and report any differences, or it can fail
with a meaningful message. Either way, this feature will only happen after
all other mechanisms have failed (The thing encountered isn't a standard
type. It isn't in the :treat_as list. It isn't in a custom unity_helper).
* default: true
* `:mock_path`:
The directory where you would like the mock files generated to be
placed.
* default: mocks
* `:mock_prefix`:
The prefix to prepend to your mock files. For example, if it's “Mock”, a file
“USART.h” will get a mock called “MockUSART.c”. This CAN be used with a suffix
at the same time.
* default: Mock
* `:mock_suffix`:
The suffix to append to your mock files. For example, it it's "_Mock", a file
"USART.h" will get a mock called "USART_Mock.h". This CAN be used with a prefix
at the same time.
* default: ""
* `:plugins`:
An array of which plugins to enable. ':expect' is always active. Also
available currently:
* `:ignore`
* `:ignore_arg`
* `:expect_any_args`
* `:array`
* `:cexception`
* `:callback`
* `:return_thru_ptr`
* `:strippables`:
An array containing a list of items to remove from the header
before deciding what should be mocked. This can be something simple
like a compiler extension CMock wouldn't recognize, or could be a
regex to reject certain function name patterns. This is a great way to
get rid of compiler extensions when your test compiler doesn't support
them. For example, use `:strippables: ['(?:functionName\s*\(+.*?\)+)']`
to prevent a function `functionName` from being mocked. By default, it
is ignoring all gcc attribute extensions.
* default: ['(?:__attribute__\s*\(+.*?\)+)']
* `:subdir`:
This is a relative subdirectory for your mocks. Set this to e.g. "sys" in
order to create a mock for `sys/types.h` in `(:mock_path)/sys/`.
* default: ""
* `:treat_as`:
The `:treat_as` list is a shortcut for when you have created typedefs
of standard types. Why create a custom unity helper for UINT16 when
the unity function TEST_ASSERT_EQUAL_HEX16 will work just perfectly?
Just add 'UINT16' => 'HEX16' to your list (actually, don't. We already
did that one for you). Maybe you have a type that is a pointer to an
array of unsigned characters? No problem, just add 'UINT8_T*' =>
'HEX8*'
* NOTE: unlike the other options, your specifications MERGE with the
default list. Therefore, if you want to override something, you must
reassign it to something else (or to *nil* if you don't want it)
* default:
* 'int': 'INT'
* 'char': 'INT8'
* 'short': 'INT16'
* 'long': 'INT'
* 'int8': 'INT8'
* 'int16': 'INT16'
* 'int32': 'INT'
* 'int8_t': 'INT8'
* 'int16_t': 'INT16'
* 'int32_t': 'INT'
* 'INT8_T': 'INT8'
* 'INT16_T': 'INT16'
* 'INT32_T': 'INT'
* 'bool': 'INT'
* 'bool_t': 'INT'
* 'BOOL': 'INT'
* 'BOOL_T': 'INT'
* 'unsigned int': 'HEX32'
* 'unsigned long': 'HEX32'
* 'uint32': 'HEX32'
* 'uint32_t': 'HEX32'
* 'UINT32': 'HEX32'
* 'UINT32_T': 'HEX32'
* 'void*': 'HEX8_ARRAY'
* 'unsigned short': 'HEX16'
* 'uint16': 'HEX16'
* 'uint16_t': 'HEX16'
* 'UINT16': 'HEX16'
* 'UINT16_T': 'HEX16'
* 'unsigned char': 'HEX8'
* 'uint8': 'HEX8'
* 'uint8_t': 'HEX8'
* 'UINT8': 'HEX8'
* 'UINT8_T': 'HEX8'
* 'char*': 'STRING'
* 'pCHAR': 'STRING'
* 'cstring': 'STRING'
* 'CSTRING': 'STRING'
* 'float': 'FLOAT'
* 'double': 'FLOAT'
* `:treat_as_void`:
We've seen "fun" legacy systems typedef 'void' with a custom type,
like MY_VOID. Add any instances of those to this list to help CMock
understand how to deal with your code.
* default: []
* `:treat_externs`:
This specifies how you want CMock to handle functions that have been
marked as extern in the header file. Should it mock them?
* `:include` will mock externed functions
* `:exclude` will ignore externed functions (default).
* `:unity_helper_path`:
If you have created a header with your own extensions to unity to
handle your own types, you can set this argument to that path. CMock
will then automagically pull in your helpers and use them. The only
trick is that you make sure you follow the naming convention:
`UNITY_TEST_ASSERT_EQUAL_YourType`. If it finds macros of the right
shape that match that pattern, it'll use them.
* default: []
* `:verbosity`:
How loud should CMock be?
* 0 for errors only
* 1 for errors and warnings
* 2 for normal (default)
* 3 for verbose
* `:weak`:
When set this to some value, the generated mocks are defined as weak
symbols using the configured format. This allows them to be overridden
in particular tests.
* Set to '__attribute ((weak))' for weak mocks when using GCC.
* Set to any non-empty string for weak mocks when using IAR.
* default: ""
* `:when_no_prototypes`:
When you give CMock a header file and ask it to create a mock out of
it, it usually contains function prototypes (otherwise what was the
point?). You can control what happens when this isn't true. You can
set this to `:warn,` `:ignore,` or `:error`
* default: :warn
* `:when_ptr`:
You can customize how CMock deals with pointers (c strings result in
string comparisons... we're talking about **other** pointers here). Your
options are `:compare_ptr` to just verify the pointers are the same,
`:compare_data` or `:smart` to verify that the data is the same.
`:compare_data` and `:smart` behaviors will change slightly based on
if you have the array plugin enabled. By default, they compare a
single element of what is being pointed to. So if you have a pointer
to a struct called ORGAN_T, it will compare one ORGAN_T (whatever that
is).
* default: :smart
* `:fail_on_unexpected_calls`:
By default, CMock will fail a test if a mock is called without _Expect and _Ignore
called first. While this forces test writers to be more explicit in their expectations,
it can clutter tests with _Expect or _Ignore calls for functions which are not the focus
of the test. While this is a good indicator that this module should be refactored, some
users are not fans of the additional noise.
Therefore, :fail_on_unexpected_calls can be set to false to force all mocks to start with
the assumption that they are operating as _Ignore unless otherwise specified.
* default: true
* **note:**
If this option is disabled, the mocked functions will return
a default value (0) when called (and only if they have to return something of course).
Compiled Options:
-----------------
A number of #defines also exist for customizing the cmock experience.
Feel free to pass these into your compiler or whatever is most
convenient. CMock will otherwise do its best to guess what you want
based on other settings, particularly Unity's settings.
* `CMOCK_MEM_STATIC` or `CMOCK_MEM_DYNAMIC`
Define one of these to determine if you want to dynamically add
memory during tests as required from the heap. If static, you
can control the total footprint of Cmock. If dynamic, you will
need to make sure you make some heap space available for Cmock.
* `CMOCK_MEM_SIZE`
In static mode this is the total amount of memory you are allocating
to Cmock. In Dynamic mode this is the size of each chunk allocated
at once (larger numbers grab more memory but require less mallocs).
* `CMOCK_MEM_ALIGN`
The way to align your data to. Not everything is as flexible as
a PC, as most embedded designers know. This defaults to 2, meaning
align to the closest 2^2 -> 4 bytes (32 bits). You can turn off alignment
by setting 0, force alignment to the closest uint16 with 1 or even
to the closest uint64 with 3.
* `CMOCK_MEM_PTR_AS_INT`
This is used internally to hold pointers... it needs to be big
enough. On most processors a pointer is the same as an unsigned
long... but maybe that's not true for yours?
* `CMOCK_MEM_INDEX_TYPE`
This needs to be something big enough to point anywhere in Cmock's
memory space... usually it's an unsigned int.
Examples
========
You can look in the [examples directory](/examples/) for a couple of examples on how
you might tool CMock into your build process. You may also want to consider
using [Ceedling](https://throwtheswitch.org/ceedling). Please note that
these examples are meant to show how the build process works. They have
failing tests ON PURPOSE to show what that would look like. Don't be alarmed. ;)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,206 @@
# ThrowTheSwitch.org Coding Standard
Hi. Welcome to the coding standard for ThrowTheSwitch.org. For the most part,
we try to follow these standards to unify our contributors' code into a cohesive
unit (puns intended). You might find places where these standards aren't
followed. We're not perfect. Please be polite where you notice these discrepancies
and we'll try to be polite when we notice yours.
;)
## Why Have A Coding Standard?
Being consistent makes code easier to understand. We've tried to keep
our standard simple because we also believe that we can only expect someone to
follow something that is understandable. Please do your best.
## Our Philosophy
Before we get into details on syntax, let's take a moment to talk about our
vision for these tools. We're C developers and embedded software developers.
These tools are great to test any C code, but catering to embedded software has
made us more tolerant of compiler quirks. There are a LOT of quirky compilers
out there. By quirky I mean "doesn't follow standards because they feel like
they have a license to do as they wish."
Our philosophy is "support every compiler we can". Most often, this means that
we aim for writing C code that is standards compliant (often C89... that seems
to be a sweet spot that is almost always compatible). But it also means these
tools are tolerant of things that aren't common. Some that aren't even
compliant. There are configuration options to override the size of standard
types. There are configuration options to force Unity to not use certain
standard library functions. A lot of Unity is configurable and we have worked
hard to make it not TOO ugly in the process.
Similarly, our tools that parse C do their best. They aren't full C parsers
(yet) and, even if they were, they would still have to accept non-standard
additions like gcc extensions or specifying `@0x1000` to force a variable to
compile to a particular location. It's just what we do, because we like
everything to Just Work™.
Speaking of having things Just Work™, that's our second philosophy. By that, we
mean that we do our best to have EVERY configuration option have a logical
default. We believe that if you're working with a simple compiler and target,
you shouldn't need to configure very much... we try to make the tools guess as
much as they can, but give the user the power to override it when it's wrong.
## Naming Things
Let's talk about naming things. Programming is all about naming things. We name
files, functions, variables, and so much more. While we're not always going to
find the best name for something, we actually put a bit of effort into
finding *What Something WANTS to be Called*™.
When naming things, we follow this hierarchy, the first being the
most important to us (but we do all four when possible):
1. Readable
2. Descriptive
3. Consistent
4. Memorable
#### Readable
We want to read our code. This means we like names and flow that are more
naturally read. We try to avoid double negatives. We try to avoid cryptic
abbreviations (sticking to ones we feel are common).
#### Descriptive
We like descriptive names for things, especially functions and variables.
Finding the right name for something is an important endeavor. You might notice
from poking around our code that this often results in names that are a little
longer than the average. Guilty. We're okay with a bit more typing if it
means our code is easier to understand.
There are two exceptions to this rule that we also stick to as religiously as
possible:
First, while we realize hungarian notation (and similar systems for encoding
type information into variable names) is providing a more descriptive name, we
feel that (for the average developer) it takes away from readability and is to be avoided.
Second, loop counters and other local throw-away variables often have a purpose
which is obvious. There's no need, therefore, to get carried away with complex
naming. We find i, j, and k are better loop counters than loopCounterVar or
whatnot. We only break this rule when we see that more description could improve
understanding of an algorithm.
#### Consistent
We like consistency, but we're not really obsessed with it. We try to name our
configuration macros in a consistent fashion... you'll notice a repeated use of
UNITY_EXCLUDE_BLAH or UNITY_USES_BLAH macros. This helps users avoid having to
remember each macro's details.
#### Memorable
Where ever it doesn't violate the above principles, we try to apply memorable
names. Sometimes this means using something that is simply descriptive, but
often we strive for descriptive AND unique... we like quirky names that stand
out in our memory and are easier to search for. Take a look through the file
names in Ceedling and you'll get a good idea of what we are talking about here.
Why use preprocess when you can use preprocessinator? Or what better describes a
module in charge of invoking tasks during releases than release_invoker? Don't
get carried away. The names are still descriptive and fulfill the above
requirements, but they don't feel stale.
## C and C++ Details
We don't really want to add to the style battles out there. Tabs or spaces?
How many spaces? Where do the braces go? These are age-old questions that will
never be answered... or at least not answered in a way that will make everyone
happy.
We've decided on our own style preferences. If you'd like to contribute to these
projects (and we hope that you do), then we ask if you do your best to follow
the same. It will only hurt a little. We promise.
#### Whitespace
Our C-style is to use spaces and to use 4 of them per indent level. It's a nice
power-of-2 number that looks decent on a wide-screen. We have no more reason
than that. We break that rule when we have lines that wrap (macros or function
arguments or whatnot). When that happens, we like to indent further to line
things up in nice tidy columns.
```C
if (stuff_happened)
{
do_something();
}
```
#### Case
- Files - all lower case with underscores.
- Variables - all lower case with underscores
- Macros - all caps with underscores.
- Typedefs - all caps with underscores. (also ends with _T).
- Functions - camel cased. Usually named ModuleName_FuncName
- Constants and Globals - camel cased.
#### Braces
The left brace is on the next line after the declaration. The right brace is
directly below that. Everything in between in indented one level. If you're
catching an error and you have a one-line, go ahead and to it on the same line.
```C
while (blah)
{
//Like so. Even if only one line, we use braces.
}
```
#### Comments
Do you know what we hate? Old-school C block comments. BUT, we're using them
anyway. As we mentioned, our goal is to support every compiler we can,
especially embedded compilers. There are STILL C compilers out there that only
support old-school block comments. So that is what we're using. We apologize. We
think they are ugly too.
## Ruby Details
Is there really such thing as a Ruby coding standard? Ruby is such a free form
language, it seems almost sacrilegious to suggest that people should comply to
one method! We'll keep it really brief!
#### Whitespace
Our Ruby style is to use spaces and to use 2 of them per indent level. It's a
nice power-of-2 number that really grooves with Ruby's compact style. We have no
more reason than that. We break that rule when we have lines that wrap. When
that happens, we like to indent further to line things up in nice tidy columns.
#### Case
- Files - all lower case with underscores.
- Variables - all lower case with underscores
- Classes, Modules, etc - Camel cased.
- Functions - all lower case with underscores
- Constants - all upper case with underscores
## Documentation
Egad. Really? We use mark down and we like pdf files because they can be made to
look nice while still being portable. Good enough?
*Find The Latest of This And More at [ThrowTheSwitch.org](https://throwtheswitch.org)*

View File

@@ -0,0 +1,779 @@
# Unity Assertions Reference
## Background and Overview
### Super Condensed Version
- An assertion establishes truth (i.e. boolean True) for a single condition.
Upon boolean False, an assertion stops execution and reports the failure.
- Unity is mainly a rich collection of assertions and the support to gather up
and easily execute those assertions.
- The structure of Unity allows you to easily separate test assertions from
source code in, well, test code.
- Unity's assertions:
- Come in many, many flavors to handle different C types and assertion cases.
- Use context to provide detailed and helpful failure messages.
- Document types, expected values, and basic behavior in your source code for
free.
### Unity Is Several Things But Mainly It's Assertions
One way to think of Unity is simply as a rich collection of assertions you can
use to establish whether your source code behaves the way you think it does.
Unity provides a framework to easily organize and execute those assertions in
test code separate from your source code.
### What's an Assertion?
At their core, assertions are an establishment of truth - boolean truth. Was this
thing equal to that thing? Does that code doohickey have such-and-such property
or not? You get the idea. Assertions are executable code (to appreciate the big
picture on this read up on the difference between
[link:Dynamic Verification and Static Analysis]). A failing assertion stops
execution and reports an error through some appropriate I/O channel (e.g.
stdout, GUI, file, blinky light).
Fundamentally, for dynamic verification all you need is a single assertion
mechanism. In fact, that's what the [assert() macro in C's standard library](http://en.wikipedia.org/en/wiki/Assert.h)
is for. So why not just use it? Well, we can do far better in the reporting
department. C's `assert()` is pretty dumb as-is and is particularly poor for
handling common data types like arrays, structs, etc. And, without some other
support, it's far too tempting to litter source code with C's `assert()`'s. It's
generally much cleaner, manageable, and more useful to separate test and source
code in the way Unity facilitates.
### Unity's Assertions: Helpful Messages _and_ Free Source Code Documentation
Asserting a simple truth condition is valuable, but using the context of the
assertion is even more valuable. For instance, if you know you're comparing bit
flags and not just integers, then why not use that context to give explicit,
readable, bit-level feedback when an assertion fails?
That's what Unity's collection of assertions do - capture context to give you
helpful, meaningful assertion failure messages. In fact, the assertions
themselves also serve as executable documentation about types and values in your
source code. So long as your tests remain current with your source and all those
tests pass, you have a detailed, up-to-date view of the intent and mechanisms in
your source code. And due to a wondrous mystery, well-tested code usually tends
to be well designed code.
## Assertion Conventions and Configurations
### Naming and Parameter Conventions
The convention of assertion parameters generally follows this order:
TEST_ASSERT_X( {modifiers}, {expected}, actual, {size/count} )
The very simplest assertion possible uses only a single "actual" parameter (e.g.
a simple null check).
"Actual" is the value being tested and unlike the other parameters in an
assertion construction is the only parameter present in all assertion variants.
"Modifiers" are masks, ranges, bit flag specifiers, floating point deltas.
"Expected" is your expected value (duh) to compare to an "actual" value; it's
marked as an optional parameter because some assertions only need a single
"actual" parameter (e.g. null check).
"Size/count" refers to string lengths, number of array elements, etc.
Many of Unity's assertions are clear duplications in that the same data type
is handled by several assertions. The differences among these are in how failure
messages are presented. For instance, a `_HEX` variant of an assertion prints
the expected and actual values of that assertion formatted as hexadecimal.
#### TEST_ASSERT_X_MESSAGE Variants
_All_ assertions are complemented with a variant that includes a simple string
message as a final parameter. The string you specify is appended to an assertion
failure message in Unity output.
For brevity, the assertion variants with a message parameter are not listed
below. Just tack on `_MESSAGE` as the final component to any assertion name in
the reference list below and add a string as the final parameter.
_Example:_
TEST_ASSERT_X( {modifiers}, {expected}, actual, {size/count} )
becomes messageified like thus...
TEST_ASSERT_X_MESSAGE( {modifiers}, {expected}, actual, {size/count}, message )
Notes:
- The `_MESSAGE` variants intentionally do not support `printf` style formatting
since many embedded projects don't support or avoid `printf` for various reasons.
It is possible to use `sprintf` before the assertion to assemble a complex fail
message, if necessary.
- If you want to output a counter value within an assertion fail message (e.g. from
a loop) , building up an array of results and then using one of the `_ARRAY`
assertions (see below) might be a handy alternative to `sprintf`.
#### TEST_ASSERT_X_ARRAY Variants
Unity provides a collection of assertions for arrays containing a variety of
types. These are documented in the Array section below. These are almost on par
with the `_MESSAGE`variants of Unity's Asserts in that for pretty much any Unity
type assertion you can tack on `_ARRAY` and run assertions on an entire block of
memory.
TEST_ASSERT_EQUAL_TYPEX_ARRAY( expected, actual, {size/count} )
"Expected" is an array itself.
"Size/count" is one or two parameters necessary to establish the number of array
elements and perhaps the length of elements within the array.
Notes:
- The `_MESSAGE` variant convention still applies here to array assertions. The
`_MESSAGE` variants of the `_ARRAY` assertions have names ending with
`_ARRAY_MESSAGE`.
- Assertions for handling arrays of floating point values are grouped with float
and double assertions (see immediately following section).
### TEST_ASSERT_EACH_EQUAL_X Variants
Unity provides a collection of assertions for arrays containing a variety of
types which can be compared to a single value as well. These are documented in
the Each Equal section below. these are almost on par with the `_MESSAGE`
variants of Unity's Asserts in that for pretty much any Unity type assertion you
can inject _EACH_EQUAL and run assertions on an entire block of memory.
TEST_ASSERT_EACH_EQUAL_TYPEX( expected, actual, {size/count} )
"Expected" is a single value to compare to.
"Actual" is an array where each element will be compared to the expected value.
"Size/count" is one of two parameters necessary to establish the number of array
elements and perhaps the length of elements within the array.
Notes:
- The `_MESSAGE` variant convention still applies here to Each Equal assertions.
- Assertions for handling Each Equal of floating point values are grouped with
float and double assertions (see immediately following section).
### Configuration
#### Floating Point Support Is Optional
Support for floating point types is configurable. That is, by defining the
appropriate preprocessor symbols, floats and doubles can be individually enabled
or disabled in Unity code. This is useful for embedded targets with no floating
point math support (i.e. Unity compiles free of errors for fixed point only
platforms). See Unity documentation for specifics.
#### Maximum Data Type Width Is Configurable
Not all targets support 64 bit wide types or even 32 bit wide types. Define the
appropriate preprocessor symbols and Unity will omit all operations from
compilation that exceed the maximum width of your target. See Unity
documentation for specifics.
## The Assertions in All Their Blessed Glory
### Basic Fail and Ignore
##### `TEST_FAIL()`
This fella is most often used in special conditions where your test code is
performing logic beyond a simple assertion. That is, in practice, `TEST_FAIL()`
will always be found inside a conditional code block.
_Examples:_
- Executing a state machine multiple times that increments a counter your test
code then verifies as a final step.
- Triggering an exception and verifying it (as in Try / Catch / Throw - see the
[CException](https://github.com/ThrowTheSwitch/CException) project).
##### `TEST_IGNORE()`
Marks a test case (i.e. function meant to contain test assertions) as ignored.
Usually this is employed as a breadcrumb to come back and implement a test case.
An ignored test case has effects if other assertions are in the enclosing test
case (see Unity documentation for more).
### Boolean
##### `TEST_ASSERT (condition)`
##### `TEST_ASSERT_TRUE (condition)`
##### `TEST_ASSERT_FALSE (condition)`
##### `TEST_ASSERT_UNLESS (condition)`
A simple wording variation on `TEST_ASSERT_FALSE`.The semantics of
`TEST_ASSERT_UNLESS` aid readability in certain test constructions or
conditional statements.
##### `TEST_ASSERT_NULL (pointer)`
##### `TEST_ASSERT_NOT_NULL (pointer)`
### Signed and Unsigned Integers (of all sizes)
Large integer sizes can be disabled for build targets that do not support them.
For example, if your target only supports up to 16 bit types, by defining the
appropriate symbols Unity can be configured to omit 32 and 64 bit operations
that would break compilation (see Unity documentation for more). Refer to
Advanced Asserting later in this document for advice on dealing with other word
sizes.
##### `TEST_ASSERT_EQUAL_INT (expected, actual)`
##### `TEST_ASSERT_EQUAL_INT8 (expected, actual)`
##### `TEST_ASSERT_EQUAL_INT16 (expected, actual)`
##### `TEST_ASSERT_EQUAL_INT32 (expected, actual)`
##### `TEST_ASSERT_EQUAL_INT64 (expected, actual)`
##### `TEST_ASSERT_EQUAL (expected, actual)`
##### `TEST_ASSERT_NOT_EQUAL (expected, actual)`
##### `TEST_ASSERT_EQUAL_UINT (expected, actual)`
##### `TEST_ASSERT_EQUAL_UINT8 (expected, actual)`
##### `TEST_ASSERT_EQUAL_UINT16 (expected, actual)`
##### `TEST_ASSERT_EQUAL_UINT32 (expected, actual)`
##### `TEST_ASSERT_EQUAL_UINT64 (expected, actual)`
### Unsigned Integers (of all sizes) in Hexadecimal
All `_HEX` assertions are identical in function to unsigned integer assertions
but produce failure messages with the `expected` and `actual` values formatted
in hexadecimal. Unity output is big endian.
##### `TEST_ASSERT_EQUAL_HEX (expected, actual)`
##### `TEST_ASSERT_EQUAL_HEX8 (expected, actual)`
##### `TEST_ASSERT_EQUAL_HEX16 (expected, actual)`
##### `TEST_ASSERT_EQUAL_HEX32 (expected, actual)`
##### `TEST_ASSERT_EQUAL_HEX64 (expected, actual)`
### Masked and Bit-level Assertions
Masked and bit-level assertions produce output formatted in hexadecimal. Unity
output is big endian.
##### `TEST_ASSERT_BITS (mask, expected, actual)`
Only compares the masked (i.e. high) bits of `expected` and `actual` parameters.
##### `TEST_ASSERT_BITS_HIGH (mask, actual)`
Asserts the masked bits of the `actual` parameter are high.
##### `TEST_ASSERT_BITS_LOW (mask, actual)`
Asserts the masked bits of the `actual` parameter are low.
##### `TEST_ASSERT_BIT_HIGH (bit, actual)`
Asserts the specified bit of the `actual` parameter is high.
##### `TEST_ASSERT_BIT_LOW (bit, actual)`
Asserts the specified bit of the `actual` parameter is low.
### Integer Less Than / Greater Than
These assertions verify that the `actual` parameter is less than or greater
than `threshold` (exclusive). For example, if the threshold value is 0 for the
greater than assertion will fail if it is 0 or less.
##### `TEST_ASSERT_GREATER_THAN (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_INT (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_INT8 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_INT16 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_INT32 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_UINT (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_UINT8 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_UINT16 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_UINT32 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_HEX8 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_HEX16 (threshold, actual)`
##### `TEST_ASSERT_GREATER_THAN_HEX32 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_INT (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_INT8 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_INT16 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_INT32 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_UINT (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_UINT8 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_UINT16 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_UINT32 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_HEX8 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_HEX16 (threshold, actual)`
##### `TEST_ASSERT_LESS_THAN_HEX32 (threshold, actual)`
### Integer Ranges (of all sizes)
These assertions verify that the `expected` parameter is within +/- `delta`
(inclusive) of the `actual` parameter. For example, if the expected value is 10
and the delta is 3 then the assertion will fail for any value outside the range
of 7 - 13.
##### `TEST_ASSERT_INT_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_INT8_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_INT16_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_INT32_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_INT64_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_UINT_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_UINT8_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_UINT16_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_UINT32_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_UINT64_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_HEX_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_HEX8_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_HEX16_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_HEX32_WITHIN (delta, expected, actual)`
##### `TEST_ASSERT_HEX64_WITHIN (delta, expected, actual)`
### Structs and Strings
##### `TEST_ASSERT_EQUAL_PTR (expected, actual)`
Asserts that the pointers point to the same memory location.
##### `TEST_ASSERT_EQUAL_STRING (expected, actual)`
Asserts that the null terminated (`'\0'`)strings are identical. If strings are
of different lengths or any portion of the strings before their terminators
differ, the assertion fails. Two NULL strings (i.e. zero length) are considered
equivalent.
##### `TEST_ASSERT_EQUAL_MEMORY (expected, actual, len)`
Asserts that the contents of the memory specified by the `expected` and `actual`
pointers is identical. The size of the memory blocks in bytes is specified by
the `len` parameter.
### Arrays
`expected` and `actual` parameters are both arrays. `num_elements` specifies the
number of elements in the arrays to compare.
`_HEX` assertions produce failure messages with expected and actual array
contents formatted in hexadecimal.
For array of strings comparison behavior, see comments for
`TEST_ASSERT_EQUAL_STRING` in the preceding section.
Assertions fail upon the first element in the compared arrays found not to
match. Failure messages specify the array index of the failed comparison.
##### `TEST_ASSERT_EQUAL_INT_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_INT8_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_INT16_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_INT32_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_INT64_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_UINT_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_UINT8_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_UINT16_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_UINT32_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_UINT64_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_HEX_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_HEX8_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_HEX16_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_HEX32_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_HEX64_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_PTR_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_STRING_ARRAY (expected, actual, num_elements)`
##### `TEST_ASSERT_EQUAL_MEMORY_ARRAY (expected, actual, len, num_elements)`
`len` is the memory in bytes to be compared at each array element.
### Each Equal (Arrays to Single Value)
`expected` are single values and `actual` are arrays. `num_elements` specifies
the number of elements in the arrays to compare.
`_HEX` assertions produce failure messages with expected and actual array
contents formatted in hexadecimal.
Assertions fail upon the first element in the compared arrays found not to
match. Failure messages specify the array index of the failed comparison.
#### `TEST_ASSERT_EACH_EQUAL_INT (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_INT8 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_INT16 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_INT32 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_INT64 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_UINT (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_UINT8 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_UINT16 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_UINT32 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_UINT64 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_HEX (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_HEX8 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_HEX16 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_HEX32 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_HEX64 (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_PTR (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_STRING (expected, actual, num_elements)`
#### `TEST_ASSERT_EACH_EQUAL_MEMORY (expected, actual, len, num_elements)`
`len` is the memory in bytes to be compared at each array element.
### Floating Point (If enabled)
##### `TEST_ASSERT_FLOAT_WITHIN (delta, expected, actual)`
Asserts that the `actual` value is within +/- `delta` of the `expected` value.
The nature of floating point representation is such that exact evaluations of
equality are not guaranteed.
##### `TEST_ASSERT_EQUAL_FLOAT (expected, actual)`
Asserts that the ?actual?value is "close enough to be considered equal" to the
`expected` value. If you are curious about the details, refer to the Advanced
Asserting section for more details on this. Omitting a user-specified delta in a
floating point assertion is both a shorthand convenience and a requirement of
code generation conventions for CMock.
##### `TEST_ASSERT_EQUAL_FLOAT_ARRAY (expected, actual, num_elements)`
See Array assertion section for details. Note that individual array element
float comparisons are executed using T?EST_ASSERT_EQUAL_FLOAT?.That is, user
specified delta comparison values requires a custom-implemented floating point
array assertion.
##### `TEST_ASSERT_FLOAT_IS_INF (actual)`
Asserts that `actual` parameter is equivalent to positive infinity floating
point representation.
##### `TEST_ASSERT_FLOAT_IS_NEG_INF (actual)`
Asserts that `actual` parameter is equivalent to negative infinity floating
point representation.
##### `TEST_ASSERT_FLOAT_IS_NAN (actual)`
Asserts that `actual` parameter is a Not A Number floating point representation.
##### `TEST_ASSERT_FLOAT_IS_DETERMINATE (actual)`
Asserts that ?actual?parameter is a floating point representation usable for
mathematical operations. That is, the `actual` parameter is neither positive
infinity nor negative infinity nor Not A Number floating point representations.
##### `TEST_ASSERT_FLOAT_IS_NOT_INF (actual)`
Asserts that `actual` parameter is a value other than positive infinity floating
point representation.
##### `TEST_ASSERT_FLOAT_IS_NOT_NEG_INF (actual)`
Asserts that `actual` parameter is a value other than negative infinity floating
point representation.
##### `TEST_ASSERT_FLOAT_IS_NOT_NAN (actual)`
Asserts that `actual` parameter is a value other than Not A Number floating
point representation.
##### `TEST_ASSERT_FLOAT_IS_NOT_DETERMINATE (actual)`
Asserts that `actual` parameter is not usable for mathematical operations. That
is, the `actual` parameter is either positive infinity or negative infinity or
Not A Number floating point representations.
### Double (If enabled)
##### `TEST_ASSERT_DOUBLE_WITHIN (delta, expected, actual)`
Asserts that the `actual` value is within +/- `delta` of the `expected` value.
The nature of floating point representation is such that exact evaluations of
equality are not guaranteed.
##### `TEST_ASSERT_EQUAL_DOUBLE (expected, actual)`
Asserts that the `actual` value is "close enough to be considered equal" to the
`expected` value. If you are curious about the details, refer to the Advanced
Asserting section for more details. Omitting a user-specified delta in a
floating point assertion is both a shorthand convenience and a requirement of
code generation conventions for CMock.
##### `TEST_ASSERT_EQUAL_DOUBLE_ARRAY (expected, actual, num_elements)`
See Array assertion section for details. Note that individual array element
double comparisons are executed using `TEST_ASSERT_EQUAL_DOUBLE`.That is, user
specified delta comparison values requires a custom implemented double array
assertion.
##### `TEST_ASSERT_DOUBLE_IS_INF (actual)`
Asserts that `actual` parameter is equivalent to positive infinity floating
point representation.
##### `TEST_ASSERT_DOUBLE_IS_NEG_INF (actual)`
Asserts that `actual` parameter is equivalent to negative infinity floating point
representation.
##### `TEST_ASSERT_DOUBLE_IS_NAN (actual)`
Asserts that `actual` parameter is a Not A Number floating point representation.
##### `TEST_ASSERT_DOUBLE_IS_DETERMINATE (actual)`
Asserts that `actual` parameter is a floating point representation usable for
mathematical operations. That is, the ?actual?parameter is neither positive
infinity nor negative infinity nor Not A Number floating point representations.
##### `TEST_ASSERT_DOUBLE_IS_NOT_INF (actual)`
Asserts that `actual` parameter is a value other than positive infinity floating
point representation.
##### `TEST_ASSERT_DOUBLE_IS_NOT_NEG_INF (actual)`
Asserts that `actual` parameter is a value other than negative infinity floating
point representation.
##### `TEST_ASSERT_DOUBLE_IS_NOT_NAN (actual)`
Asserts that `actual` parameter is a value other than Not A Number floating
point representation.
##### `TEST_ASSERT_DOUBLE_IS_NOT_DETERMINATE (actual)`
Asserts that `actual` parameter is not usable for mathematical operations. That
is, the `actual` parameter is either positive infinity or negative infinity or
Not A Number floating point representations.
## Advanced Asserting: Details On Tricky Assertions
This section helps you understand how to deal with some of the trickier
assertion situations you may run into. It will give you a glimpse into some of
the under-the-hood details of Unity's assertion mechanisms. If you're one of
those people who likes to know what is going on in the background, read on. If
not, feel free to ignore the rest of this document until you need it.
### How do the EQUAL assertions work for FLOAT and DOUBLE?
As you may know, directly checking for equality between a pair of floats or a
pair of doubles is sloppy at best and an outright no-no at worst. Floating point
values can often be represented in multiple ways, particularly after a series of
operations on a value. Initializing a variable to the value of 2.0 is likely to
result in a floating point representation of 2 x 20,but a series of
mathematical operations might result in a representation of 8 x 2-2
that also evaluates to a value of 2. At some point repeated operations cause
equality checks to fail.
So Unity doesn't do direct floating point comparisons for equality. Instead, it
checks if two floating point values are "really close." If you leave Unity
running with defaults, "really close" means "within a significant bit or two."
Under the hood, `TEST_ASSERT_EQUAL_FLOAT` is really `TEST_ASSERT_FLOAT_WITHIN`
with the `delta` parameter calculated on the fly. For single precision, delta is
the expected value multiplied by 0.00001, producing a very small proportional
range around the expected value.
If you are expecting a value of 20,000.0 the delta is calculated to be 0.2. So
any value between 19,999.8 and 20,000.2 will satisfy the equality check. This
works out to be roughly a single bit of range for a single-precision number, and
that's just about as tight a tolerance as you can reasonably get from a floating
point value.
So what happens when it's zero? Zero - even more than other floating point
values - can be represented many different ways. It doesn't matter if you have
0 x 20 or 0 x 263.It's still zero, right? Luckily, if you
subtract these values from each other, they will always produce a difference of
zero, which will still fall between 0 plus or minus a delta of 0. So it still
works!
Double precision floating point numbers use a much smaller multiplier, again
approximating a single bit of error.
If you don't like these ranges and you want to make your floating point equality
assertions less strict, you can change these multipliers to whatever you like by
defining UNITY_FLOAT_PRECISION and UNITY_DOUBLE_PRECISION. See Unity
documentation for more.
### How do we deal with targets with non-standard int sizes?
It's "fun" that C is a standard where something as fundamental as an integer
varies by target. According to the C standard, an `int` is to be the target's
natural register size, and it should be at least 16-bits and a multiple of a
byte. It also guarantees an order of sizes:
```C
char <= short <= int <= long <= long long
```
Most often, `int` is 32-bits. In many cases in the embedded world, `int` is
16-bits. There are rare microcontrollers out there that have 24-bit integers,
and this remains perfectly standard C.
To make things even more interesting, there are compilers and targets out there
that have a hard choice to make. What if their natural register size is 10-bits
or 12-bits? Clearly they can't fulfill _both_ the requirement to be at least
16-bits AND the requirement to match the natural register size. In these
situations, they often choose the natural register size, leaving us with
something like this:
```C
char (8 bit) <= short (12 bit) <= int (12 bit) <= long (16 bit)
```
Um... yikes. It's obviously breaking a rule or two... but they had to break SOME
rules, so they made a choice.
When the C99 standard rolled around, it introduced alternate standard-size types.
It also introduced macros for pulling in MIN/MAX values for your integer types.
It's glorious! Unfortunately, many embedded compilers can't be relied upon to
use the C99 types (Sometimes because they have weird register sizes as described
above. Sometimes because they don't feel like it?).
A goal of Unity from the beginning was to support every combination of
microcontroller or microprocessor and C compiler. Over time, we've gotten really
close to this. There are a few tricks that you should be aware of, though, if
you're going to do this effectively on some of these more idiosyncratic targets.
First, when setting up Unity for a new target, you're going to want to pay
special attention to the macros for automatically detecting types
(where available) or manually configuring them yourself. You can get information
on both of these in Unity's documentation.
What about the times where you suddenly need to deal with something odd, like a
24-bit `int`? The simplest solution is to use the next size up. If you have a
24-bit `int`, configure Unity to use 32-bit integers. If you have a 12-bit
`int`, configure Unity to use 16 bits. There are two ways this is going to
affect you:
1. When Unity displays errors for you, it's going to pad the upper unused bits
with zeros.
2. You're going to have to be careful of assertions that perform signed
operations, particularly `TEST_ASSERT_INT_WITHIN`.Such assertions might wrap
your `int` in the wrong place, and you could experience false failures. You can
always back down to a simple `TEST_ASSERT` and do the operations yourself.
*Find The Latest of This And More at [ThrowTheSwitch.org](https://throwtheswitch.org)*

View File

@@ -0,0 +1,433 @@
# Unity Configuration Guide
## C Standards, Compilers and Microcontrollers
The embedded software world contains its challenges. Compilers support different
revisions of the C Standard. They ignore requirements in places, sometimes to
make the language more usable in some special regard. Sometimes it's to simplify
their support. Sometimes it's due to specific quirks of the microcontroller they
are targeting. Simulators add another dimension to this menagerie.
Unity is designed to run on almost anything that is targeted by a C compiler. It
would be awesome if this could be done with zero configuration. While there are
some targets that come close to this dream, it is sadly not universal. It is
likely that you are going to need at least a couple of the configuration options
described in this document.
All of Unity's configuration options are `#defines`. Most of these are simple
definitions. A couple are macros with arguments. They live inside the
unity_internals.h header file. We don't necessarily recommend opening that file
unless you really need to. That file is proof that a cross-platform library is
challenging to build. From a more positive perspective, it is also proof that a
great deal of complexity can be centralized primarily to one place to
provide a more consistent and simple experience elsewhere.
### Using These Options
It doesn't matter if you're using a target-specific compiler and a simulator or
a native compiler. In either case, you've got a couple choices for configuring
these options:
1. Because these options are specified via C defines, you can pass most of these
options to your compiler through command line compiler flags. Even if you're
using an embedded target that forces you to use their overbearing IDE for all
configuration, there will be a place somewhere in your project to configure
defines for your compiler.
2. You can create a custom `unity_config.h` configuration file (present in your
toolchain's search paths). In this file, you will list definitions and macros
specific to your target. All you must do is define `UNITY_INCLUDE_CONFIG_H` and
Unity will rely on `unity_config.h` for any further definitions it may need.
## The Options
### Integer Types
If you've been a C developer for long, you probably already know that C's
concept of an integer varies from target to target. The C Standard has rules
about the `int` matching the register size of the target microprocessor. It has
rules about the `int` and how its size relates to other integer types. An `int`
on one target might be 16 bits while on another target it might be 64. There are
more specific types in compilers compliant with C99 or later, but that's
certainly not every compiler you are likely to encounter. Therefore, Unity has a
number of features for helping to adjust itself to match your required integer
sizes. It starts off by trying to do it automatically.
##### `UNITY_EXCLUDE_STDINT_H`
The first thing that Unity does to guess your types is check `stdint.h`.
This file includes defines like `UINT_MAX` that Unity can use to
learn a lot about your system. It's possible you don't want it to do this
(um. why not?) or (more likely) it's possible that your system doesn't
support `stdint.h`. If that's the case, you're going to want to define this.
That way, Unity will know to skip the inclusion of this file and you won't
be left with a compiler error.
_Example:_
#define UNITY_EXCLUDE_STDINT_H
##### `UNITY_EXCLUDE_LIMITS_H`
The second attempt to guess your types is to check `limits.h`. Some compilers
that don't support `stdint.h` could include `limits.h` instead. If you don't
want Unity to check this file either, define this to make it skip the inclusion.
_Example:_
#define UNITY_EXCLUDE_LIMITS_H
If you've disabled both of the automatic options above, you're going to have to
do the configuration yourself. Don't worry. Even this isn't too bad... there are
just a handful of defines that you are going to specify if you don't like the
defaults.
##### `UNITY_INT_WIDTH`
Define this to be the number of bits an `int` takes up on your system. The
default, if not autodetected, is 32 bits.
_Example:_
#define UNITY_INT_WIDTH 16
##### `UNITY_LONG_WIDTH`
Define this to be the number of bits a `long` takes up on your system. The
default, if not autodetected, is 32 bits. This is used to figure out what kind
of 64-bit support your system can handle. Does it need to specify a `long` or a
`long long` to get a 64-bit value. On 16-bit systems, this option is going to be
ignored.
_Example:_
#define UNITY_LONG_WIDTH 16
##### `UNITY_POINTER_WIDTH`
Define this to be the number of bits a pointer takes up on your system. The
default, if not autodetected, is 32-bits. If you're getting ugly compiler
warnings about casting from pointers, this is the one to look at.
_Example:_
#define UNITY_POINTER_WIDTH 64
##### `UNITY_SUPPORT_64`
Unity will automatically include 64-bit support if it auto-detects it, or if
your `int`, `long`, or pointer widths are greater than 32-bits. Define this to
enable 64-bit support if none of the other options already did it for you. There
can be a significant size and speed impact to enabling 64-bit support on small
targets, so don't define it if you don't need it.
_Example:_
#define UNITY_SUPPORT_64
### Floating Point Types
In the embedded world, it's not uncommon for targets to have no support for
floating point operations at all or to have support that is limited to only
single precision. We are able to guess integer sizes on the fly because integers
are always available in at least one size. Floating point, on the other hand, is
sometimes not available at all. Trying to include `float.h` on these platforms
would result in an error. This leaves manual configuration as the only option.
##### `UNITY_INCLUDE_FLOAT`
##### `UNITY_EXCLUDE_FLOAT`
##### `UNITY_INCLUDE_DOUBLE`
##### `UNITY_EXCLUDE_DOUBLE`
By default, Unity guesses that you will want single precision floating point
support, but not double precision. It's easy to change either of these using the
include and exclude options here. You may include neither, either, or both, as
suits your needs. For features that are enabled, the following floating point
options also become available.
_Example:_
//what manner of strange processor is this?
#define UNITY_EXCLUDE_FLOAT
#define UNITY_INCLUDE_DOUBLE
##### `UNITY_EXCLUDE_FLOAT_PRINT`
Unity aims for as small of a footprint as possible and avoids most standard
library calls (some embedded platforms dont have a standard library!). Because
of this, its routines for printing integer values are minimalist and hand-coded.
Therefore, the display of floating point values during a failure are optional.
By default, Unity will print the actual results of floating point assertion
failure (e.g. ”Expected 4.56 Was 4.68”). To not include this extra support, you
can use this define to instead respond to a failed assertion with a message like
”Values Not Within Delta”. If you would like verbose failure messages for floating
point assertions, use these options to give more explicit failure messages.
_Example:_
#define UNITY_EXCLUDE_FLOAT_PRINT
##### `UNITY_FLOAT_TYPE`
If enabled, Unity assumes you want your `FLOAT` asserts to compare standard C
floats. If your compiler supports a specialty floating point type, you can
always override this behavior by using this definition.
_Example:_
#define UNITY_FLOAT_TYPE float16_t
##### `UNITY_DOUBLE_TYPE`
If enabled, Unity assumes you want your `DOUBLE` asserts to compare standard C
doubles. If you would like to change this, you can specify something else by
using this option. For example, defining `UNITY_DOUBLE_TYPE` to `long double`
could enable gargantuan floating point types on your 64-bit processor instead of
the standard `double`.
_Example:_
#define UNITY_DOUBLE_TYPE long double
##### `UNITY_FLOAT_PRECISION`
##### `UNITY_DOUBLE_PRECISION`
If you look up `UNITY_ASSERT_EQUAL_FLOAT` and `UNITY_ASSERT_EQUAL_DOUBLE` as
documented in the big daddy Unity Assertion Guide, you will learn that they are
not really asserting that two values are equal but rather that two values are
"close enough" to equal. "Close enough" is controlled by these precision
configuration options. If you are working with 32-bit floats and/or 64-bit
doubles (the normal on most processors), you should have no need to change these
options. They are both set to give you approximately 1 significant bit in either
direction. The float precision is 0.00001 while the double is 10-12.
For further details on how this works, see the appendix of the Unity Assertion
Guide.
_Example:_
#define UNITY_FLOAT_PRECISION 0.001f
### Toolset Customization
In addition to the options listed above, there are a number of other options
which will come in handy to customize Unity's behavior for your specific
toolchain. It is possible that you may not need to touch any of these... but
certain platforms, particularly those running in simulators, may need to jump
through extra hoops to run properly. These macros will help in those
situations.
##### `UNITY_OUTPUT_CHAR(a)`
##### `UNITY_OUTPUT_FLUSH()`
##### `UNITY_OUTPUT_START()`
##### `UNITY_OUTPUT_COMPLETE()`
By default, Unity prints its results to `stdout` as it runs. This works
perfectly fine in most situations where you are using a native compiler for
testing. It works on some simulators as well so long as they have `stdout`
routed back to the command line. There are times, however, where the simulator
will lack support for dumping results or you will want to route results
elsewhere for other reasons. In these cases, you should define the
`UNITY_OUTPUT_CHAR` macro. This macro accepts a single character at a time (as
an `int`, since this is the parameter type of the standard C `putchar` function
most commonly used). You may replace this with whatever function call you like.
_Example:_
Say you are forced to run your test suite on an embedded processor with no
`stdout` option. You decide to route your test result output to a custom serial
`RS232_putc()` function you wrote like thus:
#include "RS232_header.h"
...
#define UNITY_OUTPUT_CHAR(a) RS232_putc(a)
#define UNITY_OUTPUT_START() RS232_config(115200,1,8,0)
#define UNITY_OUTPUT_FLUSH() RS232_flush()
#define UNITY_OUTPUT_COMPLETE() RS232_close()
_Note:_
`UNITY_OUTPUT_FLUSH()` can be set to the standard out flush function simply by
specifying `UNITY_USE_FLUSH_STDOUT`. No other defines are required.
##### `UNITY_WEAK_ATTRIBUTE`
##### `UNITY_WEAK_PRAGMA`
##### `UNITY_NO_WEAK`
For some targets, Unity can make the otherwise required setUp() and tearDown()
functions optional. This is a nice convenience for test writers since setUp and
tearDown dont often actually do anything. If youre using gcc or clang, this
option is automatically defined for you. Other compilers can also support this
behavior, if they support a C feature called weak functions. A weak function is
a function that is compiled into your executable unless a non-weak version of
the same function is defined elsewhere. If a non-weak version is found, the weak
version is ignored as if it never existed. If your compiler supports this feature,
you can let Unity know by defining UNITY_WEAK_ATTRIBUTE or UNITY_WEAK_PRAGMA as
the function attributes that would need to be applied to identify a function as
weak. If your compiler lacks support for weak functions, you will always need to
define setUp and tearDown functions (though they can be and often will be just
empty). You can also force Unity to NOT use weak functions by defining
UNITY_NO_WEAK. The most common options for this feature are:
_Example:_
#define UNITY_WEAK_ATTRIBUTE weak
#define UNITY_WEAK_ATTRIBUTE __attribute__((weak))
#define UNITY_WEAK_PRAGMA
#define UNITY_NO_WEAK
##### `UNITY_PTR_ATTRIBUTE`
Some compilers require a custom attribute to be assigned to pointers, like
`near` or `far`. In these cases, you can give Unity a safe default for these by
defining this option with the attribute you would like.
_Example:_
#define UNITY_PTR_ATTRIBUTE __attribute__((far))
#define UNITY_PTR_ATTRIBUTE near
##### `UNITY_PRINT_EOL`
By default, Unity outputs \n at the end of each line of output. This is easy
to parse by the scripts, by Ceedling, etc, but it might not be ideal for YOUR
system. Feel free to override this and to make it whatever you wish.
_Example:_
#define UNITY_PRINT_EOL { UNITY_OUTPUT_CHAR('\r'); UNITY_OUTPUT_CHAR('\n') }
##### `UNITY_EXCLUDE_DETAILS`
This is an option for if you absolutely must squeeze every byte of memory out of
your system. Unity stores a set of internal scratchpads which are used to pass
extra detail information around. It's used by systems like CMock in order to
report which function or argument flagged an error. If you're not using CMock and
you're not using these details for other things, then you can exclude them.
_Example:_
#define UNITY_EXCLUDE_DETAILS
##### `UNITY_EXCLUDE_SETJMP`
If your embedded system doesn't support the standard library setjmp, you can
exclude Unity's reliance on this by using this define. This dropped dependence
comes at a price, though. You will be unable to use custom helper functions for
your tests, and you will be unable to use tools like CMock. Very likely, if your
compiler doesn't support setjmp, you wouldn't have had the memory space for those
things anyway, though... so this option exists for those situations.
_Example:_
#define UNITY_EXCLUDE_SETJMP
##### `UNITY_OUTPUT_COLOR`
If you want to add color using ANSI escape codes you can use this define.
t
_Example:_
#define UNITY_OUTPUT_COLOR
## Getting Into The Guts
There will be cases where the options above aren't quite going to get everything
perfect. They are likely sufficient for any situation where you are compiling
and executing your tests with a native toolchain (e.g. clang on Mac). These
options may even get you through the majority of cases encountered in working
with a target simulator run from your local command line. But especially if you
must run your test suite on your target hardware, your Unity configuration will
require special help. This special help will usually reside in one of two
places: the `main()` function or the `RUN_TEST` macro. Let's look at how these
work.
##### `main()`
Each test module is compiled and run on its own, separate from the other test
files in your project. Each test file, therefore, has a `main` function. This
`main` function will need to contain whatever code is necessary to initialize
your system to a workable state. This is particularly true for situations where
you must set up a memory map or initialize a communication channel for the
output of your test results.
A simple main function looks something like this:
int main(void) {
UNITY_BEGIN();
RUN_TEST(test_TheFirst);
RUN_TEST(test_TheSecond);
RUN_TEST(test_TheThird);
return UNITY_END();
}
You can see that our main function doesn't bother taking any arguments. For our
most barebones case, we'll never have arguments because we just run all the
tests each time. Instead, we start by calling `UNITY_BEGIN`. We run each test
(in whatever order we wish). Finally, we call `UNITY_END`, returning its return
value (which is the total number of failures).
It should be easy to see that you can add code before any test cases are run or
after all the test cases have completed. This allows you to do any needed
system-wide setup or teardown that might be required for your special
circumstances.
##### `RUN_TEST`
The `RUN_TEST` macro is called with each test case function. Its job is to
perform whatever setup and teardown is necessary for executing a single test
case function. This includes catching failures, calling the test module's
`setUp()` and `tearDown()` functions, and calling `UnityConcludeTest()`. If
using CMock or test coverage, there will be additional stubs in use here. A
simple minimalist RUN_TEST macro looks something like this:
#define RUN_TEST(testfunc) \
UNITY_NEW_TEST(#testfunc) \
if (TEST_PROTECT()) { \
setUp(); \
testfunc(); \
} \
if (TEST_PROTECT() && (!TEST_IS_IGNORED)) \
tearDown(); \
UnityConcludeTest();
So that's quite a macro, huh? It gives you a glimpse of what kind of stuff Unity
has to deal with for every single test case. For each test case, we declare that
it is a new test. Then we run `setUp` and our test function. These are run
within a `TEST_PROTECT` block, the function of which is to handle failures that
occur during the test. Then, assuming our test is still running and hasn't been
ignored, we run `tearDown`. No matter what, our last step is to conclude this
test before moving on to the next.
Let's say you need to add a call to `fsync` to force all of your output data to
flush to a file after each test. You could easily insert this after your
`UnityConcludeTest` call. Maybe you want to write an xml tag before and after
each result set. Again, you could do this by adding lines to this macro. Updates
to this macro are for the occasions when you need an action before or after
every single test case throughout your entire suite of tests.
## Happy Porting
The defines and macros in this guide should help you port Unity to just about
any C target we can imagine. If you run into a snag or two, don't be afraid of
asking for help on the forums. We love a good challenge!
*Find The Latest of This And More at [ThrowTheSwitch.org](https://throwtheswitch.org)*

View File

@@ -0,0 +1,192 @@
# Unity - Getting Started
## Welcome
Congratulations. You're now the proud owner of your very own pile of bits! What
are you going to do with all these ones and zeros? This document should be able
to help you decide just that.
Unity is a unit test framework. The goal has been to keep it small and
functional. The core Unity test framework is three files: a single C file and a
couple header files. These team up to provide functions and macros to make
testing easier.
Unity was designed to be cross-platform. It works hard to stick with C standards
while still providing support for the many embedded C compilers that bend the
rules. Unity has been used with many compilers, including GCC, IAR, Clang,
Green Hills, Microchip, and MS Visual Studio. It's not much work to get it to
work with a new target.
### Overview of the Documents
#### Unity Assertions reference
This document will guide you through all the assertion options provided by
Unity. This is going to be your unit testing bread and butter. You'll spend more
time with assertions than any other part of Unity.
#### Unity Assertions Cheat Sheet
This document contains an abridged summary of the assertions described in the
previous document. It's perfect for printing and referencing while you
familiarize yourself with Unity's options.
#### Unity Configuration Guide
This document is the one to reference when you are going to use Unity with a new
target or compiler. It'll guide you through the configuration options and will
help you customize your testing experience to meet your needs.
#### Unity Helper Scripts
This document describes the helper scripts that are available for simplifying
your testing workflow. It describes the collection of optional Ruby scripts
included in the auto directory of your Unity installation. Neither Ruby nor
these scripts are necessary for using Unity. They are provided as a convenience
for those who wish to use them.
#### Unity License
What's an open source project without a license file? This brief document
describes the terms you're agreeing to when you use this software. Basically, we
want it to be useful to you in whatever context you want to use it, but please
don't blame us if you run into problems.
### Overview of the Folders
If you have obtained Unity through Github or something similar, you might be
surprised by just how much stuff you suddenly have staring you in the face.
Don't worry, Unity itself is very small. The rest of it is just there to make
your life easier. You can ignore it or use it at your convenience. Here's an
overview of everything in the project.
- `src` - This is the code you care about! This folder contains a C file and two
header files. These three files _are_ Unity.
- `docs` - You're reading this document, so it's possible you have found your way
into this folder already. This is where all the handy documentation can be
found.
- `examples` - This contains a few examples of using Unity.
- `extras` - These are optional add ons to Unity that are not part of the core
project. If you've reached us through James Grenning's book, you're going to
want to look here.
- `test` - This is how Unity and its scripts are all tested. If you're just using
Unity, you'll likely never need to go in here. If you are the lucky team member
who gets to port Unity to a new toolchain, this is a good place to verify
everything is configured properly.
- `auto` - Here you will find helpful Ruby scripts for simplifying your test
workflow. They are purely optional and are not required to make use of Unity.
## How to Create A Test File
Test files are C files. Most often you will create a single test file for each C
module that you want to test. The test file should include unity.h and the
header for your C module to be tested.
Next, a test file will include a `setUp()` and `tearDown()` function. The setUp
function can contain anything you would like to run before each test. The
tearDown function can contain anything you would like to run after each test.
Both functions accept no arguments and return nothing. You may leave either or
both of these blank if you have no need for them. If you're using a compiler
that is configured to make these functions optional, you may leave them off
completely. Not sure? Give it a try. If you compiler complains that it can't
find setUp or tearDown when it links, you'll know you need to at least include
an empty function for these.
The majority of the file will be a series of test functions. Test functions
follow the convention of starting with the word "test_" or "spec_". You don't HAVE
to name them this way, but it makes it clear what functions are tests for other
developers. Also, the automated scripts that come with Unity or Ceedling will default
to looking for test functions to be prefixed this way. Test functions take no arguments
and return nothing. All test accounting is handled internally in Unity.
Finally, at the bottom of your test file, you will write a `main()` function.
This function will call `UNITY_BEGIN()`, then `RUN_TEST` for each test, and
finally `UNITY_END()`.This is what will actually trigger each of those test
functions to run, so it is important that each function gets its own `RUN_TEST`
call.
Remembering to add each test to the main function can get to be tedious. If you
enjoy using helper scripts in your build process, you might consider making use
of our handy generate_test_runner.rb script. This will create the main function
and all the calls for you, assuming that you have followed the suggested naming
conventions. In this case, there is no need for you to include the main function
in your test file at all.
When you're done, your test file will look something like this:
```C
#include "unity.h"
#include "file_to_test.h"
void setUp(void) {
// set stuff up here
}
void tearDown(void) {
// clean stuff up here
}
void test_function_should_doBlahAndBlah(void) {
//test stuff
}
void test_function_should_doAlsoDoBlah(void) {
//more test stuff
}
int main(void) {
UNITY_BEGIN();
RUN_TEST(test_function_should_doBlahAndBlah);
RUN_TEST(test_function_should_doAlsoDoBlah);
return UNITY_END();
}
```
It's possible that you will need more customization than this, eventually.
For that sort of thing, you're going to want to look at the configuration guide.
This should be enough to get you going, though.
## How to Build and Run A Test File
This is the single biggest challenge to picking up a new unit testing framework,
at least in a language like C or C++. These languages are REALLY good at getting
you "close to the metal" (why is the phrase metal? Wouldn't it be more accurate
to say "close to the silicon"?). While this feature is usually a good thing, it
can make testing more challenging.
You have two really good options for toolchains. Depending on where you're
coming from, it might surprise you that neither of these options is running the
unit tests on your hardware.
There are many reasons for this, but here's a short version:
- On hardware, you have too many constraints (processing power, memory, etc),
- On hardware, you don't have complete control over all registers,
- On hardware, unit testing is more challenging,
- Unit testing isn't System testing. Keep them separate.
Instead of running your tests on your actual hardware, most developers choose to
develop them as native applications (using gcc or MSVC for example) or as
applications running on a simulator. Either is a good option. Native apps have
the advantages of being faster and easier to set up. Simulator apps have the
advantage of working with the same compiler as your target application. The
options for configuring these are discussed in the configuration guide.
To get either to work, you might need to make a few changes to the file
containing your register set (discussed later).
In either case, a test is built by linking unity, the test file, and the C
file(s) being tested. These files create an executable which can be run as the
test set for that module. Then, this process is repeated for the next test file.
This flexibility of separating tests into individual executables allows us to
much more thoroughly unit test our system and it keeps all the test code out of
our final release!
*Find The Latest of This And More at [ThrowTheSwitch.org](https://throwtheswitch.org)*

View File

@@ -0,0 +1,260 @@
# Unity Helper Scripts
## With a Little Help From Our Friends
Sometimes what it takes to be a really efficient C programmer is a little non-C.
The Unity project includes a couple of Ruby scripts for making your life just a tad
easier. They are completely optional. If you choose to use them, you'll need a
copy of Ruby, of course. Just install whatever the latest version is, and it is
likely to work. You can find Ruby at [ruby-lang.org](https://ruby-labg.org/).
### `generate_test_runner.rb`
Are you tired of creating your own `main` function in your test file? Do you
keep forgetting to add a `RUN_TEST` call when you add a new test case to your
suite? Do you want to use CMock or other fancy add-ons but don't want to figure
out how to create your own `RUN_TEST` macro?
Well then we have the perfect script for you!
The `generate_test_runner` script processes a given test file and automatically
creates a separate test runner file that includes ?main?to execute the test
cases within the scanned test file. All you do then is add the generated runner
to your list of files to be compiled and linked, and presto you're done!
This script searches your test file for void function signatures having a
function name beginning with "test" or "spec". It treats each of these
functions as a test case and builds up a test suite of them. For example, the
following includes three test cases:
```C
void testVerifyThatUnityIsAwesomeAndWillMakeYourLifeEasier(void)
{
ASSERT_TRUE(1);
}
void test_FunctionName_should_WorkProperlyAndReturn8(void) {
ASSERT_EQUAL_INT(8, FunctionName());
}
void spec_Function_should_DoWhatItIsSupposedToDo(void) {
ASSERT_NOT_NULL(Function(5));
}
```
You can run this script a couple of ways. The first is from the command line:
```Shell
ruby generate_test_runner.rb TestFile.c NameOfRunner.c
```
Alternatively, if you include only the test file parameter, the script will copy
the name of the test file and automatically append "_Runner" to the name of the
generated file. The example immediately below will create TestFile_Runner.c.
```Shell
ruby generate_test_runner.rb TestFile.c
```
You can also add a [YAML](http://www.yaml.org/) file to configure extra options.
Conveniently, this YAML file is of the same format as that used by Unity and
CMock. So if you are using YAML files already, you can simply pass the very same
file into the generator script.
```Shell
ruby generate_test_runner.rb TestFile.c my_config.yml
```
The contents of the YAML file `my_config.yml` could look something like the
example below. If you're wondering what some of these options do, you're going
to love the next section of this document.
```YAML
:unity:
:includes:
- stdio.h
- microdefs.h
:cexception: 1
:suit_setup: "blah = malloc(1024);"
:suite_teardown: "free(blah);"
```
If you would like to force your generated test runner to include one or more
header files, you can just include those at the command line too. Just make sure
these are _after_ the YAML file, if you are using one:
```Shell
ruby generate_test_runner.rb TestFile.c my_config.yml extras.h
```
Another option, particularly if you are already using Ruby to orchestrate your
builds - or more likely the Ruby-based build tool Rake - is requiring this
script directly. Anything that you would have specified in a YAML file can be
passed to the script as part of a hash. Let's push the exact same requirement
set as we did above but this time through Ruby code directly:
```Ruby
require "generate_test_runner.rb"
options = {
:includes => ["stdio.h", "microdefs.h"],
:cexception => 1,
:suite_setup => "blah = malloc(1024);",
:suite_teardown => "free(blah);"
}
UnityTestRunnerGenerator.new.run(testfile, runner_name, options)
```
If you have multiple files to generate in a build script (such as a Rakefile),
you might want to instantiate a generator object with your options and call it
to generate each runner afterwards. Like thus:
```Ruby
gen = UnityTestRunnerGenerator.new(options)
test_files.each do |f|
gen.run(f, File.basename(f,'.c')+"Runner.c"
end
```
#### Options accepted by generate_test_runner.rb:
The following options are available when executing `generate_test_runner`. You
may pass these as a Ruby hash directly or specify them in a YAML file, both of
which are described above. In the `examples` directory, Example 3's Rakefile
demonstrates using a Ruby hash.
##### `:includes`
This option specifies an array of file names to be `#include`'d at the top of
your runner C file. You might use it to reference custom types or anything else
universally needed in your generated runners.
##### `:suite_setup`
Define this option with C code to be executed _before any_ test cases are run.
Alternatively, if your C compiler supports weak symbols, you can leave this
option unset and instead provide a `void suiteSetUp(void)` function in your test
suite. The linker will look for this symbol and fall back to a Unity-provided
stub if it is not found.
##### `:suite_teardown`
Define this option with C code to be executed _after all_ test cases have
finished. An integer variable `num_failures` is available for diagnostics.
The code should end with a `return` statement; the value returned will become
the exit code of `main`. You can normally just return `num_failures`.
Alternatively, if your C compiler supports weak symbols, you can leave this
option unset and instead provide a `int suiteTearDown(int num_failures)`
function in your test suite. The linker will look for this symbol and fall
back to a Unity-provided stub if it is not found.
##### `:enforce_strict_ordering`
This option should be defined if you have the strict order feature enabled in
CMock (see CMock documentation). This generates extra variables required for
everything to run smoothly. If you provide the same YAML to the generator as
used in CMock's configuration, you've already configured the generator properly.
##### `:mock_prefix` and `:mock_suffix`
Unity automatically generates calls to Init, Verify and Destroy for every file
included in the main test file that starts with the given mock prefix and ends
with the given mock suffix, file extension not included. By default, Unity
assumes a `Mock` prefix and no suffix.
##### `:plugins`
This option specifies an array of plugins to be used (of course, the array can
contain only a single plugin). This is your opportunity to enable support for
CException support, which will add a check for unhandled exceptions in each
test, reporting a failure if one is detected. To enable this feature using Ruby:
```Ruby
:plugins => [ :cexception ]
```
Or as a yaml file:
```YAML
:plugins:
-:cexception
```
If you are using CMock, it is very likely that you are already passing an array
of plugins to CMock. You can just use the same array here. This script will just
ignore the plugins that don't require additional support.
### `unity_test_summary.rb`
A Unity test file contains one or more test case functions. Each test case can
pass, fail, or be ignored. Each test file is run individually producing results
for its collection of test cases. A given project will almost certainly be
composed of multiple test files. Therefore, the suite of tests is comprised of
one or more test cases spread across one or more test files. This script
aggregates individual test file results to generate a summary of all executed
test cases. The output includes how many tests were run, how many were ignored,
and how many failed. In addition, the output includes a listing of which
specific tests were ignored and failed. A good example of the breadth and
details of these results can be found in the `examples` directory. Intentionally
ignored and failing tests in this project generate corresponding entries in the
summary report.
If you're interested in other (prettier?) output formats, check into the
Ceedling build tool project (ceedling.sourceforge.net) that works with Unity and
CMock and supports xunit-style xml as well as other goodies.
This script assumes the existence of files ending with the extensions
`.testpass` and `.testfail`.The contents of these files includes the test
results summary corresponding to each test file executed with the extension set
according to the presence or absence of failures for that test file. The script
searches a specified path for these files, opens each one it finds, parses the
results, and aggregates and prints a summary. Calling it from the command line
looks like this:
```Shell
ruby unity_test_summary.rb build/test/
```
You can optionally specify a root path as well. This is really helpful when you
are using relative paths in your tools' setup, but you want to pull the summary
into an IDE like Eclipse for clickable shortcuts.
```Shell
ruby unity_test_summary.rb build/test/ ~/projects/myproject/
```
Or, if you're more of a Windows sort of person:
```Shell
ruby unity_test_summary.rb build\teat\ C:\projects\myproject\
```
When configured correctly, you'll see a final summary, like so:
```Shell
--------------------------
UNITY IGNORED TEST SUMMARY
--------------------------
blah.c:22:test_sandwiches_should_HaveBreadOnTwoSides:IGNORE
-------------------------
UNITY FAILED TEST SUMMARY
-------------------------
blah.c:87:test_sandwiches_should_HaveCondiments:FAIL:Expected 1 was 0
meh.c:38:test_soda_should_BeCalledPop:FAIL:Expected "pop" was "coke"
--------------------------
OVERALL UNITY TEST SUMMARY
--------------------------
45 TOTAL TESTS 2 TOTAL FAILURES 1 IGNORED
```
How convenient is that?
*Find The Latest of This And More at [ThrowTheSwitch.org](https://throwtheswitch.org)*

99
tinyusb/test/vendor/ceedling/lib/ceedling.rb vendored Executable file
View File

@@ -0,0 +1,99 @@
##
# This module defines the interface for interacting with and loading a project
# with Ceedling.
module Ceedling
##
# Returns the location where the gem is installed.
# === Return
# _String_ - The location where the gem lives.
def self.location
File.join( File.dirname(__FILE__), '..')
end
##
# Return the path to the "built-in" plugins.
# === Return
# _String_ - The path where the default plugins live.
def self.load_path
File.join( self.location, 'plugins')
end
##
# Return the path to the Ceedling Rakefile
# === Return
# _String_
def self.rakefile
File.join( self.location, 'lib', 'ceedling', 'rakefile.rb' )
end
##
# This method selects the project file that Ceedling will use by setting the
# CEEDLING_MAIN_PROJECT_FILE environment variable before loading the ceedling
# rakefile. A path supplied as an argument to this method will override the
# current value of the environment variable. If no path is supplied as an
# argument then the existing value of the environment variable is used. If
# the environment variable has not been set and no argument has been supplied
# then a default path of './project.yml' will be used.
#
# === Arguments
# +options+ _Hash_::
# A hash containing the options for ceedling. Currently the following
# options are supported:
# * +config+ - The path to the project YAML configuration file.
# * +root+ - The root of the project directory.
# * +prefix+ - A prefix to prepend to plugin names in order to determine the
# corresponding gem name.
# * +plugins+ - The list of ceedling plugins to load
def self.load_project(options = {})
# Make sure our path to the yaml file is setup
if options.has_key? :config
ENV['CEEDLING_MAIN_PROJECT_FILE'] = options[:config]
elsif ENV['CEEDLING_MAIN_PROJECT_FILE'].nil?
ENV['CEEDLING_MAIN_PROJECT_FILE'] = './project.yml'
end
# Register the plugins
if options.has_key? :plugins
options[:plugins].each do |plugin|
register_plugin( plugin, options[:prefix] )
end
end
# Define the root of the project if specified
Object.const_set('PROJECT_ROOT', options[:root]) if options.has_key? :root
# Load ceedling
load "#{self.rakefile}"
end
##
# Register a plugin for ceedling to use when a project is loaded. This method
# *must* be called prior to calling the _load_project_ method.
#
# This method is intended to be used for loading plugins distributed via the
# RubyGems mechanism. As such, the following gem structure is assumed for
# plugins.
#
# * The gem name must be prefixed with 'ceedling-' followed by the plugin
# name (ex. 'ceedling-bullseye')
#
# * The contents of the plugin must be isntalled into a subdirectory of
# the gem with the same name as the plugin (ex. 'bullseye/')
#
# === Arguments
# +name+ _String_:: The name of the plugin to load.
# +prefix+ _String_::
# (optional, default = nil) The prefix to use for the full gem name.
def self.register_plugin(name, prefix=nil)
# Figure out the full name of the gem and location
prefix ||= 'ceedling-'
gem_name = prefix + name
gem_dir = Gem::Specification.find_by_name(gem_name).gem_dir()
# Register the plugin with Ceedling
require 'ceedling/defaults'
DEFAULT_CEEDLING_CONFIG[:plugins][:enabled] << name
DEFAULT_CEEDLING_CONFIG[:plugins][:load_paths] << gem_dir
end
end

View File

@@ -0,0 +1,39 @@
require 'ceedling/constants'
##
# Utilities for raiser and reporting errors during building.
class BuildInvokerUtils
constructor :configurator, :streaminator
##
# Processes exceptions and tries to display a useful message for the user.
#
# ==== Attriboops...utes
#
# * _exception_: The exception given by a rescue statement.
# * _context_: A symbol representing where in the build the exception
# occurs.
# * _test_build_: A bool to signify if the exception occurred while building
# from test or source.
#
def process_exception(exception, context, test_build=true)
if (exception.message =~ /Don't know how to build task '(.+)'/i)
error_header = "ERROR: Rake could not find file referenced in source"
error_header += " or test" if (test_build)
error_header += ": '#{$1}'. Possible stale dependency."
@streaminator.stderr_puts( error_header )
if (@configurator.project_use_deep_dependencies)
help_message = "Try fixing #include statements or adding missing file. Then run '#{REFRESH_TASK_ROOT}#{context.to_s}' task and try again."
@streaminator.stderr_puts( help_message )
end
raise ''
else
raise exception
end
end
end

View File

@@ -0,0 +1,47 @@
class Cacheinator
constructor :cacheinator_helper, :file_path_utils, :file_wrapper, :yaml_wrapper
def cache_test_config(hash)
@yaml_wrapper.dump( @file_path_utils.form_test_build_cache_path( INPUT_CONFIGURATION_CACHE_FILE), hash )
end
def cache_release_config(hash)
@yaml_wrapper.dump( @file_path_utils.form_release_build_cache_path( INPUT_CONFIGURATION_CACHE_FILE ), hash )
end
def diff_cached_test_file( filepath )
cached_filepath = @file_path_utils.form_test_build_cache_path( filepath )
if (@file_wrapper.exist?( cached_filepath ) and (!@file_wrapper.compare( filepath, cached_filepath )))
@file_wrapper.cp(filepath, cached_filepath, {:preserve => false})
return filepath
elsif (!@file_wrapper.exist?( cached_filepath ))
@file_wrapper.cp(filepath, cached_filepath, {:preserve => false})
return filepath
end
return cached_filepath
end
def diff_cached_test_config?(hash)
cached_filepath = @file_path_utils.form_test_build_cache_path(INPUT_CONFIGURATION_CACHE_FILE)
return @cacheinator_helper.diff_cached_config?( cached_filepath, hash )
end
def diff_cached_test_defines?(files)
cached_filepath = @file_path_utils.form_test_build_cache_path(DEFINES_DEPENDENCY_CACHE_FILE)
return @cacheinator_helper.diff_cached_defines?( cached_filepath, files )
end
def diff_cached_release_config?(hash)
cached_filepath = @file_path_utils.form_release_build_cache_path(INPUT_CONFIGURATION_CACHE_FILE)
return @cacheinator_helper.diff_cached_config?( cached_filepath, hash )
end
end

View File

@@ -0,0 +1,31 @@
class CacheinatorHelper
constructor :file_wrapper, :yaml_wrapper
def diff_cached_config?(cached_filepath, hash)
return true if ( not @file_wrapper.exist?(cached_filepath) )
return true if ( (@file_wrapper.exist?(cached_filepath)) and (!(@yaml_wrapper.load(cached_filepath) == hash)) )
return false
end
def diff_cached_defines?(cached_filepath, files)
current_defines = COLLECTION_DEFINES_TEST_AND_VENDOR.reject(&:empty?)
current_dependency = Hash[files.collect { |source| [source, current_defines.dup] }]
if not @file_wrapper.exist?(cached_filepath)
@yaml_wrapper.dump(cached_filepath, current_dependency)
return false
end
dependencies = @yaml_wrapper.load(cached_filepath)
if dependencies.values_at(*current_dependency.keys) != current_dependency.values
dependencies.merge!(current_dependency)
@yaml_wrapper.dump(cached_filepath, dependencies)
return true
end
return false
end
end

View File

@@ -0,0 +1,15 @@
require 'cmock'
class CmockBuilder
attr_accessor :cmock
def setup
@cmock = nil
end
def manufacture(cmock_config)
@cmock = CMock.new(cmock_config)
end
end

View File

@@ -0,0 +1,363 @@
require 'ceedling/defaults'
require 'ceedling/constants'
require 'ceedling/file_path_utils'
require 'deep_merge'
class Configurator
attr_reader :project_config_hash, :script_plugins, :rake_plugins
attr_accessor :project_logging, :project_debug, :project_verbosity, :sanity_checks
constructor(:configurator_setup, :configurator_builder, :configurator_plugins, :cmock_builder, :yaml_wrapper, :system_wrapper) do
@project_logging = false
@project_debug = false
@project_verbosity = Verbosity::NORMAL
@sanity_checks = TestResultsSanityChecks::NORMAL
end
def setup
# special copy of cmock config to provide to cmock for construction
@cmock_config_hash = {}
# note: project_config_hash is an instance variable so constants and accessors created
# in eval() statements in build() have something of proper scope and persistence to reference
@project_config_hash = {}
@project_config_hash_backup = {}
@script_plugins = []
@rake_plugins = []
end
def replace_flattened_config(config)
@project_config_hash.merge!(config)
@configurator_setup.build_constants_and_accessors(@project_config_hash, binding())
end
def store_config
@project_config_hash_backup = @project_config_hash.clone
end
def restore_config
@project_config_hash = @project_config_hash_backup
@configurator_setup.build_constants_and_accessors(@project_config_hash, binding())
end
def reset_defaults(config)
[:test_compiler,
:test_linker,
:test_fixture,
:test_includes_preprocessor,
:test_file_preprocessor,
:test_dependencies_generator,
:release_compiler,
:release_assembler,
:release_linker,
:release_dependencies_generator].each do |tool|
config[:tools].delete(tool) if (not (config[:tools][tool].nil?))
end
end
# The default values defined in defaults.rb (eg. DEFAULT_TOOLS_TEST) are populated
# into @param config
def populate_defaults(config)
new_config = DEFAULT_CEEDLING_CONFIG.deep_clone
new_config.deep_merge!(config)
config.replace(new_config)
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_TEST )
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_TEST_PREPROCESSORS ) if (config[:project][:use_test_preprocessor])
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_TEST_DEPENDENCIES ) if (config[:project][:use_deep_dependencies])
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_RELEASE ) if (config[:project][:release_build])
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_RELEASE_ASSEMBLER ) if (config[:project][:release_build] and config[:release_build][:use_assembly])
@configurator_builder.populate_defaults( config, DEFAULT_TOOLS_RELEASE_DEPENDENCIES ) if (config[:project][:release_build] and config[:project][:use_deep_dependencies])
end
def populate_unity_defaults(config)
unity = config[:unity] || {}
@runner_config = unity.merge(@runner_config || config[:test_runner] || {})
end
def populate_cmock_defaults(config)
# cmock has its own internal defaults handling, but we need to set these specific values
# so they're present for the build environment to access;
# note: these need to end up in the hash given to initialize cmock for this to be successful
cmock = config[:cmock] || {}
# yes, we're duplicating the default mock_prefix in cmock, but it's because we need CMOCK_MOCK_PREFIX always available in Ceedling's environment
cmock[:mock_prefix] = 'Mock' if (cmock[:mock_prefix].nil?)
# just because strict ordering is the way to go
cmock[:enforce_strict_ordering] = true if (cmock[:enforce_strict_ordering].nil?)
cmock[:mock_path] = File.join(config[:project][:build_root], TESTS_BASE_PATH, 'mocks') if (cmock[:mock_path].nil?)
cmock[:verbosity] = @project_verbosity if (cmock[:verbosity].nil?)
cmock[:plugins] = [] if (cmock[:plugins].nil?)
cmock[:plugins].map! { |plugin| plugin.to_sym }
cmock[:plugins] << (:cexception) if (!cmock[:plugins].include?(:cexception) and (config[:project][:use_exceptions]))
cmock[:plugins].uniq!
cmock[:unity_helper] = false if (cmock[:unity_helper].nil?)
if (cmock[:unity_helper])
cmock[:unity_helper] = [cmock[:unity_helper]] if cmock[:unity_helper].is_a? String
cmock[:includes] += cmock[:unity_helper].map{|helper| File.basename(helper) }
cmock[:includes].uniq!
end
@runner_config = cmock.merge(@runner_config || config[:test_runner] || {})
@cmock_builder.manufacture(cmock)
end
def get_runner_config
@runner_config
end
# grab tool names from yaml and insert into tool structures so available for error messages
# set up default values
def tools_setup(config)
config[:tools].each_key do |name|
tool = config[:tools][name]
# populate name if not given
tool[:name] = name.to_s if (tool[:name].nil?)
# handle inline ruby string substitution in executable
if (tool[:executable] =~ RUBY_STRING_REPLACEMENT_PATTERN)
tool[:executable].replace(@system_wrapper.module_eval(tool[:executable]))
end
# populate stderr redirect option
tool[:stderr_redirect] = StdErrRedirect::NONE if (tool[:stderr_redirect].nil?)
# populate background execution option
tool[:background_exec] = BackgroundExec::NONE if (tool[:background_exec].nil?)
# populate optional option to control verification of executable in search paths
tool[:optional] = false if (tool[:optional].nil?)
end
end
def tools_supplement_arguments(config)
tools_name_prefix = 'tools_'
config[:tools].each_key do |name|
tool = @project_config_hash[(tools_name_prefix + name.to_s).to_sym]
# smoosh in extra arguments if specified at top-level of config (useful for plugins & default gcc tools)
# arguments are squirted in at _end_ of list
top_level_tool = (tools_name_prefix + name.to_s).to_sym
if (not config[top_level_tool].nil?)
# adding and flattening is not a good idea: might over-flatten if there's array nesting in tool args
tool[:arguments].concat config[top_level_tool][:arguments]
end
end
end
def find_and_merge_plugins(config)
# plugins must be loaded before generic path evaluation & magic that happen later;
# perform path magic here as discrete step
config[:plugins][:load_paths].each do |path|
path.replace(@system_wrapper.module_eval(path)) if (path =~ RUBY_STRING_REPLACEMENT_PATTERN)
FilePathUtils::standardize(path)
end
config[:plugins][:load_paths] << FilePathUtils::standardize(Ceedling.load_path)
config[:plugins][:load_paths].uniq!
paths_hash = @configurator_plugins.add_load_paths(config)
@rake_plugins = @configurator_plugins.find_rake_plugins(config, paths_hash)
@script_plugins = @configurator_plugins.find_script_plugins(config, paths_hash)
config_plugins = @configurator_plugins.find_config_plugins(config, paths_hash)
plugin_defaults = @configurator_plugins.find_plugin_defaults(config, paths_hash)
config_plugins.each do |plugin|
plugin_config = @yaml_wrapper.load(plugin)
config.deep_merge(plugin_config)
end
plugin_defaults.each do |defaults|
@configurator_builder.populate_defaults( config, @yaml_wrapper.load(defaults) )
end
# special plugin setting for results printing
config[:plugins][:display_raw_test_results] = true if (config[:plugins][:display_raw_test_results].nil?)
paths_hash.each_pair { |name, path| config[:plugins][name] = path }
end
def merge_imports(config)
if config[:import]
until config[:import].empty?
path = config[:import].shift
path = @system_wrapper.module_eval(path) if (path =~ RUBY_STRING_REPLACEMENT_PATTERN)
config.deep_merge!(@yaml_wrapper.load(path))
end
end
config.delete(:import)
end
def eval_environment_variables(config)
config[:environment].each do |hash|
key = hash.keys[0]
value = hash[key]
items = []
interstitial = ((key == :path) ? File::PATH_SEPARATOR : '')
items = ((value.class == Array) ? hash[key] : [value])
items.each { |item| item.replace( @system_wrapper.module_eval( item ) ) if (item =~ RUBY_STRING_REPLACEMENT_PATTERN) }
hash[key] = items.join( interstitial )
@system_wrapper.env_set( key.to_s.upcase, hash[key] )
end
end
def eval_paths(config)
# [:plugins]:[load_paths] already handled
paths = [ # individual paths that don't follow convention processed below
config[:project][:build_root],
config[:release_build][:artifacts]]
eval_path_list( paths )
config[:paths].each_pair { |collection, paths| eval_path_list( paths ) }
config[:files].each_pair { |collection, files| eval_path_list( files ) }
# all other paths at secondary hash key level processed by convention:
# ex. [:toplevel][:foo_path] & [:toplevel][:bar_paths] are evaluated
config.each_pair { |parent, child| eval_path_list( collect_path_list( child ) ) }
end
def standardize_paths(config)
# [:plugins]:[load_paths] already handled
paths = [ # individual paths that don't follow convention processed below
config[:project][:build_root],
config[:release_build][:artifacts]] # cmock path in case it was explicitly set in config
paths.flatten.each { |path| FilePathUtils::standardize( path ) }
config[:paths].each_pair do |collection, paths|
# ensure that list is an array (i.e. handle case of list being a single string,
# or a multidimensional array)
config[:paths][collection] = [paths].flatten.map{|path| FilePathUtils::standardize( path )}
end
config[:files].each_pair { |collection, files| files.each{ |path| FilePathUtils::standardize( path ) } }
config[:tools].each_pair { |tool, config| FilePathUtils::standardize( config[:executable] ) if (config.include? :executable) }
# all other paths at secondary hash key level processed by convention:
# ex. [:toplevel][:foo_path] & [:toplevel][:bar_paths] are standardized
config.each_pair do |parent, child|
collect_path_list( child ).each { |path| FilePathUtils::standardize( path ) }
end
end
def validate(config)
# collect felonies and go straight to jail
raise if (not @configurator_setup.validate_required_sections( config ))
# collect all misdemeanors, everybody on probation
blotter = []
blotter << @configurator_setup.validate_required_section_values( config )
blotter << @configurator_setup.validate_paths( config )
blotter << @configurator_setup.validate_tools( config )
blotter << @configurator_setup.validate_plugins( config )
raise if (blotter.include?( false ))
end
# create constants and accessors (attached to this object) from given hash
def build(config, *keys)
# create flattened & expanded configuration hash
built_config = @configurator_setup.build_project_config( config, @configurator_builder.flattenify( config ) )
@project_config_hash = built_config.clone
store_config()
@configurator_setup.build_constants_and_accessors(built_config, binding())
# top-level keys disappear when we flatten, so create global constants & accessors to any specified keys
keys.each do |key|
hash = { key => config[key] }
@configurator_setup.build_constants_and_accessors(hash, binding())
end
end
# add to constants and accessors as post build step
def build_supplement(config_base, config_more)
# merge in our post-build additions to base configuration hash
config_base.deep_merge!( config_more )
# flatten our addition hash
config_more_flattened = @configurator_builder.flattenify( config_more )
# merge our flattened hash with built hash from previous build
@project_config_hash.deep_merge!( config_more_flattened )
store_config()
# create more constants and accessors
@configurator_setup.build_constants_and_accessors(config_more_flattened, binding())
# recreate constants & update accessors with new merged, base values
config_more.keys.each do |key|
hash = { key => config_base[key] }
@configurator_setup.build_constants_and_accessors(hash, binding())
end
end
def insert_rake_plugins(plugins)
plugins.each do |plugin|
@project_config_hash[:project_rakefile_component_files] << plugin
end
end
### private ###
private
def collect_path_list( container )
paths = []
container.each_key { |key| paths << container[key] if (key.to_s =~ /_path(s)?$/) } if (container.class == Hash)
return paths.flatten
end
def eval_path_list( paths )
if paths.kind_of?(Array)
paths = Array.new(paths)
end
paths.flatten.each do |path|
path.replace( @system_wrapper.module_eval( path ) ) if (path =~ RUBY_STRING_REPLACEMENT_PATTERN)
end
end
end

View File

@@ -0,0 +1,458 @@
require 'rubygems'
require 'rake' # for ext() method
require 'ceedling/file_path_utils' # for class methods
require 'ceedling/defaults'
require 'ceedling/constants' # for Verbosity constants class & base file paths
class ConfiguratorBuilder
constructor :file_system_utils, :file_wrapper, :system_wrapper
def build_global_constants(config)
config.each_pair do |key, value|
formatted_key = key.to_s.upcase
# undefine global constant if it already exists
Object.send(:remove_const, formatted_key.to_sym) if @system_wrapper.constants_include?(formatted_key)
# create global constant
Object.module_eval("#{formatted_key} = value")
end
end
def build_accessor_methods(config, context)
config.each_pair do |key, value|
# fill configurator object with accessor methods
eval("def #{key.to_s.downcase}() return @project_config_hash[:#{key.to_s}] end", context)
end
end
# create a flattened hash from the original configuration structure
def flattenify(config)
new_hash = {}
config.each_key do | parent |
# gracefully handle empty top-level entries
next if (config[parent].nil?)
case config[parent]
when Array
config[parent].each do |hash|
key = "#{parent.to_s.downcase}_#{hash.keys[0].to_s.downcase}".to_sym
new_hash[key] = hash[hash.keys[0]]
end
when Hash
config[parent].each_pair do | child, value |
key = "#{parent.to_s.downcase}_#{child.to_s.downcase}".to_sym
new_hash[key] = value
end
# handle entries with no children, only values
else
new_hash["#{parent.to_s.downcase}".to_sym] = config[parent]
end
end
return new_hash
end
def populate_defaults(config, defaults)
defaults.keys.sort.each do |section|
defaults[section].keys.sort.each do |entry|
config[section] = {} if config[section].nil?
config[section][entry] = defaults[section][entry].deep_clone if (config[section][entry].nil?)
end
end
end
def clean(in_hash)
# ensure that include files inserted into test runners have file extensions & proper ones at that
in_hash[:test_runner_includes].map!{|include| include.ext(in_hash[:extension_header])}
end
def set_build_paths(in_hash)
out_hash = {}
project_build_artifacts_root = File.join(in_hash[:project_build_root], 'artifacts')
project_build_tests_root = File.join(in_hash[:project_build_root], TESTS_BASE_PATH)
project_build_release_root = File.join(in_hash[:project_build_root], RELEASE_BASE_PATH)
paths = [
[:project_build_artifacts_root, project_build_artifacts_root, true ],
[:project_build_tests_root, project_build_tests_root, true ],
[:project_build_release_root, project_build_release_root, in_hash[:project_release_build] ],
[:project_test_artifacts_path, File.join(project_build_artifacts_root, TESTS_BASE_PATH), true ],
[:project_test_runners_path, File.join(project_build_tests_root, 'runners'), true ],
[:project_test_results_path, File.join(project_build_tests_root, 'results'), true ],
[:project_test_build_output_path, File.join(project_build_tests_root, 'out'), true ],
[:project_test_build_output_asm_path, File.join(project_build_tests_root, 'out', 'asm'), true ],
[:project_test_build_output_c_path, File.join(project_build_tests_root, 'out', 'c'), true ],
[:project_test_build_cache_path, File.join(project_build_tests_root, 'cache'), true ],
[:project_test_dependencies_path, File.join(project_build_tests_root, 'dependencies'), true ],
[:project_release_artifacts_path, File.join(project_build_artifacts_root, RELEASE_BASE_PATH), in_hash[:project_release_build] ],
[:project_release_build_cache_path, File.join(project_build_release_root, 'cache'), in_hash[:project_release_build] ],
[:project_release_build_output_path, File.join(project_build_release_root, 'out'), in_hash[:project_release_build] ],
[:project_release_build_output_asm_path, File.join(project_build_release_root, 'out', 'asm'), in_hash[:project_release_build] ],
[:project_release_build_output_c_path, File.join(project_build_release_root, 'out', 'c'), in_hash[:project_release_build] ],
[:project_release_dependencies_path, File.join(project_build_release_root, 'dependencies'), in_hash[:project_release_build] ],
[:project_log_path, File.join(in_hash[:project_build_root], 'logs'), true ],
[:project_temp_path, File.join(in_hash[:project_build_root], 'temp'), true ],
[:project_test_preprocess_includes_path, File.join(project_build_tests_root, 'preprocess/includes'), in_hash[:project_use_test_preprocessor] ],
[:project_test_preprocess_files_path, File.join(project_build_tests_root, 'preprocess/files'), in_hash[:project_use_test_preprocessor] ],
]
out_hash[:project_build_paths] = []
# fetch already set mock path
out_hash[:project_build_paths] << in_hash[:cmock_mock_path] if (in_hash[:project_use_mocks])
paths.each do |path|
build_path_name = path[0]
build_path = path[1]
build_path_add_condition = path[2]
# insert path into build paths if associated with true condition
out_hash[:project_build_paths] << build_path if build_path_add_condition
# set path symbol name and path for each entry in paths array
out_hash[build_path_name] = build_path
end
return out_hash
end
def set_force_build_filepaths(in_hash)
out_hash = {}
out_hash[:project_test_force_rebuild_filepath] = File.join( in_hash[:project_test_dependencies_path], 'force_build' )
out_hash[:project_release_force_rebuild_filepath] = File.join( in_hash[:project_release_dependencies_path], 'force_build' ) if (in_hash[:project_release_build])
return out_hash
end
def set_rakefile_components(in_hash)
out_hash = {
:project_rakefile_component_files =>
[File.join(CEEDLING_LIB, 'ceedling', 'tasks_base.rake'),
File.join(CEEDLING_LIB, 'ceedling', 'tasks_filesystem.rake'),
File.join(CEEDLING_LIB, 'ceedling', 'tasks_tests.rake'),
File.join(CEEDLING_LIB, 'ceedling', 'tasks_vendor.rake'),
File.join(CEEDLING_LIB, 'ceedling', 'rules_tests.rake')]}
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'rules_cmock.rake') if (in_hash[:project_use_mocks])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'rules_preprocess.rake') if (in_hash[:project_use_test_preprocessor])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'rules_tests_deep_dependencies.rake') if (in_hash[:project_use_deep_dependencies])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'tasks_tests_deep_dependencies.rake') if (in_hash[:project_use_deep_dependencies])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'rules_release_deep_dependencies.rake') if (in_hash[:project_release_build] and in_hash[:project_use_deep_dependencies])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'rules_release.rake') if (in_hash[:project_release_build])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'tasks_release_deep_dependencies.rake') if (in_hash[:project_release_build] and in_hash[:project_use_deep_dependencies])
out_hash[:project_rakefile_component_files] << File.join(CEEDLING_LIB, 'ceedling', 'tasks_release.rake') if (in_hash[:project_release_build])
return out_hash
end
def set_release_target(in_hash)
return {} if (not in_hash[:project_release_build])
release_target_file = ((in_hash[:release_build_output].nil?) ? (DEFAULT_RELEASE_TARGET_NAME.ext(in_hash[:extension_executable])) : in_hash[:release_build_output])
release_map_file = ((in_hash[:release_build_output].nil?) ? (DEFAULT_RELEASE_TARGET_NAME.ext(in_hash[:extension_map])) : in_hash[:release_build_output].ext(in_hash[:extension_map]))
return {
# tempted to make a helper method in file_path_utils? stop right there, pal. you'll introduce a cyclical dependency
:project_release_build_target => File.join(in_hash[:project_build_release_root], release_target_file),
:project_release_build_map => File.join(in_hash[:project_build_release_root], release_map_file)
}
end
def collect_project_options(in_hash)
options = []
in_hash[:project_options_paths].each do |path|
options << @file_wrapper.directory_listing( File.join(path, '*.yml') )
end
return {
:collection_project_options => options.flatten
}
end
def expand_all_path_globs(in_hash)
out_hash = {}
path_keys = []
in_hash.each_key do |key|
next if (not key.to_s[0..4] == 'paths')
path_keys << key
end
# sorted to provide assured order of traversal in test calls on mocks
path_keys.sort.each do |key|
out_hash["collection_#{key.to_s}".to_sym] = @file_system_utils.collect_paths( in_hash[key] )
end
return out_hash
end
def collect_source_and_include_paths(in_hash)
return {
:collection_paths_source_and_include =>
( in_hash[:collection_paths_source] +
in_hash[:collection_paths_include] ).select {|x| File.directory?(x)}
}
end
def collect_source_include_vendor_paths(in_hash)
extra_paths = []
extra_paths << File.join(in_hash[:cexception_vendor_path], CEXCEPTION_LIB_PATH) if (in_hash[:project_use_exceptions])
return {
:collection_paths_source_include_vendor =>
in_hash[:collection_paths_source_and_include] +
extra_paths
}
end
def collect_test_support_source_include_paths(in_hash)
return {
:collection_paths_test_support_source_include =>
(in_hash[:collection_paths_test] +
in_hash[:collection_paths_support] +
in_hash[:collection_paths_source] +
in_hash[:collection_paths_include] ).select {|x| File.directory?(x)}
}
end
def collect_vendor_paths(in_hash)
return {:collection_paths_vendor => get_vendor_paths(in_hash)}
end
def collect_test_support_source_include_vendor_paths(in_hash)
return {
:collection_paths_test_support_source_include_vendor =>
in_hash[:collection_paths_test_support_source_include] +
get_vendor_paths(in_hash)
}
end
def collect_tests(in_hash)
all_tests = @file_wrapper.instantiate_file_list
in_hash[:collection_paths_test].each do |path|
all_tests.include( File.join(path, "#{in_hash[:project_test_file_prefix]}*#{in_hash[:extension_source]}") )
end
@file_system_utils.revise_file_list( all_tests, in_hash[:files_test] )
return {:collection_all_tests => all_tests}
end
def collect_assembly(in_hash)
all_assembly = @file_wrapper.instantiate_file_list
return {:collection_all_assembly => all_assembly} if ((not in_hash[:release_build_use_assembly]) && (not in_hash[:test_build_use_assembly]))
# Sprinkle in all assembly files we can find in the source folders
in_hash[:collection_paths_source].each do |path|
all_assembly.include( File.join(path, "*#{in_hash[:extension_assembly]}") )
end
# Also add all assembly files we can find in the support folders
in_hash[:collection_paths_support].each do |path|
all_assembly.include( File.join(path, "*#{in_hash[:extension_assembly]}") )
end
# Also add files that we are explicitly adding via :files:assembly: section
@file_system_utils.revise_file_list( all_assembly, in_hash[:files_assembly] )
return {:collection_all_assembly => all_assembly}
end
def collect_source(in_hash)
all_source = @file_wrapper.instantiate_file_list
in_hash[:collection_paths_source].each do |path|
if File.exists?(path) and not File.directory?(path)
all_source.include( path )
else
all_source.include( File.join(path, "*#{in_hash[:extension_source]}") )
end
end
@file_system_utils.revise_file_list( all_source, in_hash[:files_source] )
return {:collection_all_source => all_source}
end
def collect_headers(in_hash)
all_headers = @file_wrapper.instantiate_file_list
paths =
in_hash[:collection_paths_test] +
in_hash[:collection_paths_support] +
in_hash[:collection_paths_source] +
in_hash[:collection_paths_include]
paths.each do |path|
all_headers.include( File.join(path, "*#{in_hash[:extension_header]}") )
end
@file_system_utils.revise_file_list( all_headers, in_hash[:files_include] )
return {:collection_all_headers => all_headers}
end
def collect_release_existing_compilation_input(in_hash)
release_input = @file_wrapper.instantiate_file_list
paths =
in_hash[:collection_paths_source] +
in_hash[:collection_paths_include]
paths << File.join(in_hash[:cexception_vendor_path], CEXCEPTION_LIB_PATH) if (in_hash[:project_use_exceptions])
paths.each do |path|
release_input.include( File.join(path, "*#{in_hash[:extension_header]}") )
if File.exists?(path) and not File.directory?(path)
release_input.include( path )
else
release_input.include( File.join(path, "*#{in_hash[:extension_source]}") )
end
end
@file_system_utils.revise_file_list( release_input, in_hash[:files_source] )
@file_system_utils.revise_file_list( release_input, in_hash[:files_include] )
# finding assembly files handled explicitly through other means
return {:collection_release_existing_compilation_input => release_input}
end
def collect_all_existing_compilation_input(in_hash)
all_input = @file_wrapper.instantiate_file_list
paths =
in_hash[:collection_paths_test] +
in_hash[:collection_paths_support] +
in_hash[:collection_paths_source] +
in_hash[:collection_paths_include] +
[File.join(in_hash[:unity_vendor_path], UNITY_LIB_PATH)]
paths << File.join(in_hash[:cexception_vendor_path], CEXCEPTION_LIB_PATH) if (in_hash[:project_use_exceptions])
paths << File.join(in_hash[:cmock_vendor_path], CMOCK_LIB_PATH) if (in_hash[:project_use_mocks])
paths.each do |path|
all_input.include( File.join(path, "*#{in_hash[:extension_header]}") )
if File.exists?(path) and not File.directory?(path)
all_input.include( path )
else
all_input.include( File.join(path, "*#{in_hash[:extension_source]}") )
all_input.include( File.join(path, "*#{in_hash[:extension_assembly]}") ) if (defined?(TEST_BUILD_USE_ASSEMBLY) && TEST_BUILD_USE_ASSEMBLY)
end
end
@file_system_utils.revise_file_list( all_input, in_hash[:files_test] )
@file_system_utils.revise_file_list( all_input, in_hash[:files_support] )
@file_system_utils.revise_file_list( all_input, in_hash[:files_source] )
@file_system_utils.revise_file_list( all_input, in_hash[:files_include] )
# finding assembly files handled explicitly through other means
return {:collection_all_existing_compilation_input => all_input}
end
def collect_test_and_vendor_defines(in_hash)
test_defines = in_hash[:defines_test].clone
test_defines.concat(in_hash[:unity_defines])
test_defines.concat(in_hash[:cmock_defines]) if (in_hash[:project_use_mocks])
test_defines.concat(in_hash[:cexception_defines]) if (in_hash[:project_use_exceptions])
return {:collection_defines_test_and_vendor => test_defines}
end
def collect_release_and_vendor_defines(in_hash)
release_defines = in_hash[:defines_release].clone
release_defines.concat(in_hash[:cexception_defines]) if (in_hash[:project_use_exceptions])
return {:collection_defines_release_and_vendor => release_defines}
end
def collect_release_artifact_extra_link_objects(in_hash)
objects = []
# no build paths here so plugins can remap if necessary (i.e. path mapping happens at runtime)
objects << CEXCEPTION_C_FILE.ext( in_hash[:extension_object] ) if (in_hash[:project_use_exceptions])
return {:collection_release_artifact_extra_link_objects => objects}
end
def collect_test_fixture_extra_link_objects(in_hash)
# Note: Symbols passed to compiler at command line can change Unity and CException behavior / configuration;
# we also handle those dependencies elsewhere in compilation dependencies
objects = [UNITY_C_FILE]
in_hash[:files_support].each { |file| objects << File.basename(file) }
# we don't include paths here because use of plugins or mixing different compilers may require different build paths
objects << CEXCEPTION_C_FILE if (in_hash[:project_use_exceptions])
objects << CMOCK_C_FILE if (in_hash[:project_use_mocks])
# if we're using mocks & a unity helper is defined & that unity helper includes a source file component (not only a header of macros),
# then link in the unity_helper object file too
if ( in_hash[:project_use_mocks] and in_hash[:cmock_unity_helper] )
in_hash[:cmock_unity_helper].each do |helper|
if @file_wrapper.exist?(helper.ext(in_hash[:extension_source]))
objects << File.basename(helper)
end
end
end
# no build paths here so plugins can remap if necessary (i.e. path mapping happens at runtime)
objects.map! { |object| object.ext(in_hash[:extension_object]) }
return { :collection_test_fixture_extra_link_objects => objects }
end
private
def get_vendor_paths(in_hash)
vendor_paths = []
vendor_paths << File.join(in_hash[:unity_vendor_path], UNITY_LIB_PATH)
vendor_paths << File.join(in_hash[:cexception_vendor_path], CEXCEPTION_LIB_PATH) if (in_hash[:project_use_exceptions])
vendor_paths << File.join(in_hash[:cmock_vendor_path], CMOCK_LIB_PATH) if (in_hash[:project_use_mocks])
vendor_paths << in_hash[:cmock_mock_path] if (in_hash[:project_use_mocks])
return vendor_paths
end
end

View File

@@ -0,0 +1,111 @@
require 'ceedling/constants'
class ConfiguratorPlugins
constructor :stream_wrapper, :file_wrapper, :system_wrapper
attr_reader :rake_plugins, :script_plugins
def setup
@rake_plugins = []
@script_plugins = []
end
def add_load_paths(config)
plugin_paths = {}
config[:plugins][:enabled].each do |plugin|
config[:plugins][:load_paths].each do |root|
path = File.join(root, plugin)
is_script_plugin = ( not @file_wrapper.directory_listing( File.join( path, 'lib', '*.rb' ) ).empty? )
is_rake_plugin = ( not @file_wrapper.directory_listing( File.join( path, '*.rake' ) ).empty? )
if is_script_plugin or is_rake_plugin
plugin_paths[(plugin + '_path').to_sym] = path
if is_script_plugin
@system_wrapper.add_load_path( File.join( path, 'lib') )
end
break
end
end
end
return plugin_paths
end
# gather up and return .rake filepaths that exist on-disk
def find_rake_plugins(config, plugin_paths)
@rake_plugins = []
plugins_with_path = []
config[:plugins][:enabled].each do |plugin|
if path = plugin_paths[(plugin + '_path').to_sym]
rake_plugin_path = File.join(path, "#{plugin}.rake")
if (@file_wrapper.exist?(rake_plugin_path))
plugins_with_path << rake_plugin_path
@rake_plugins << plugin
end
end
end
return plugins_with_path
end
# gather up and return just names of .rb classes that exist on-disk
def find_script_plugins(config, plugin_paths)
@script_plugins = []
config[:plugins][:enabled].each do |plugin|
if path = plugin_paths[(plugin + '_path').to_sym]
script_plugin_path = File.join(path, "lib", "#{plugin}.rb")
if @file_wrapper.exist?(script_plugin_path)
@script_plugins << plugin
end
end
end
return @script_plugins
end
# gather up and return configuration .yml filepaths that exist on-disk
def find_config_plugins(config, plugin_paths)
plugins_with_path = []
config[:plugins][:enabled].each do |plugin|
if path = plugin_paths[(plugin + '_path').to_sym]
config_plugin_path = File.join(path, "config", "#{plugin}.yml")
if @file_wrapper.exist?(config_plugin_path)
plugins_with_path << config_plugin_path
end
end
end
return plugins_with_path
end
# gather up and return default .yml filepaths that exist on-disk
def find_plugin_defaults(config, plugin_paths)
defaults_with_path = []
config[:plugins][:enabled].each do |plugin|
if path = plugin_paths[(plugin + '_path').to_sym]
default_path = File.join(path, 'config', 'defaults.yml')
if @file_wrapper.exist?(default_path)
defaults_with_path << default_path
end
end
end
return defaults_with_path
end
end

View File

@@ -0,0 +1,127 @@
# add sort-ability to symbol so we can order keys array in hash for test-ability
class Symbol
include Comparable
def <=>(other)
self.to_s <=> other.to_s
end
end
class ConfiguratorSetup
constructor :configurator_builder, :configurator_validator, :configurator_plugins, :stream_wrapper
def build_project_config(config, flattened_config)
### flesh out config
@configurator_builder.clean(flattened_config)
### add to hash values we build up from configuration & file system contents
flattened_config.merge!(@configurator_builder.set_build_paths(flattened_config))
flattened_config.merge!(@configurator_builder.set_force_build_filepaths(flattened_config))
flattened_config.merge!(@configurator_builder.set_rakefile_components(flattened_config))
flattened_config.merge!(@configurator_builder.set_release_target(flattened_config))
flattened_config.merge!(@configurator_builder.collect_project_options(flattened_config))
### iterate through all entries in paths section and expand any & all globs to actual paths
flattened_config.merge!(@configurator_builder.expand_all_path_globs(flattened_config))
flattened_config.merge!(@configurator_builder.collect_vendor_paths(flattened_config))
flattened_config.merge!(@configurator_builder.collect_source_and_include_paths(flattened_config))
flattened_config.merge!(@configurator_builder.collect_source_include_vendor_paths(flattened_config))
flattened_config.merge!(@configurator_builder.collect_test_support_source_include_paths(flattened_config))
flattened_config.merge!(@configurator_builder.collect_test_support_source_include_vendor_paths(flattened_config))
flattened_config.merge!(@configurator_builder.collect_tests(flattened_config))
flattened_config.merge!(@configurator_builder.collect_assembly(flattened_config))
flattened_config.merge!(@configurator_builder.collect_source(flattened_config))
flattened_config.merge!(@configurator_builder.collect_headers(flattened_config))
flattened_config.merge!(@configurator_builder.collect_release_existing_compilation_input(flattened_config))
flattened_config.merge!(@configurator_builder.collect_all_existing_compilation_input(flattened_config))
flattened_config.merge!(@configurator_builder.collect_test_and_vendor_defines(flattened_config))
flattened_config.merge!(@configurator_builder.collect_release_and_vendor_defines(flattened_config))
flattened_config.merge!(@configurator_builder.collect_release_artifact_extra_link_objects(flattened_config))
flattened_config.merge!(@configurator_builder.collect_test_fixture_extra_link_objects(flattened_config))
return flattened_config
end
def build_constants_and_accessors(config, context)
@configurator_builder.build_global_constants(config)
@configurator_builder.build_accessor_methods(config, context)
end
def validate_required_sections(config)
validation = []
validation << @configurator_validator.exists?(config, :project)
validation << @configurator_validator.exists?(config, :paths)
return false if (validation.include?(false))
return true
end
def validate_required_section_values(config)
validation = []
validation << @configurator_validator.exists?(config, :project, :build_root)
validation << @configurator_validator.exists?(config, :paths, :test)
validation << @configurator_validator.exists?(config, :paths, :source)
return false if (validation.include?(false))
return true
end
def validate_paths(config)
validation = []
if config[:cmock][:unity_helper]
config[:cmock][:unity_helper].each do |path|
validation << @configurator_validator.validate_filepath_simple( path, :cmock, :unity_helper )
end
end
config[:project][:options_paths].each do |path|
validation << @configurator_validator.validate_filepath_simple( path, :project, :options_paths )
end
config[:plugins][:load_paths].each do |path|
validation << @configurator_validator.validate_filepath_simple( path, :plugins, :load_paths )
end
config[:paths].keys.sort.each do |key|
validation << @configurator_validator.validate_path_list(config, :paths, key)
end
return false if (validation.include?(false))
return true
end
def validate_tools(config)
validation = []
config[:tools].keys.sort.each do |key|
validation << @configurator_validator.exists?(config, :tools, key, :executable)
validation << @configurator_validator.validate_executable_filepath(config, :tools, key, :executable) if (not config[:tools][key][:optional])
validation << @configurator_validator.validate_tool_stderr_redirect(config, :tools, key)
end
return false if (validation.include?(false))
return true
end
def validate_plugins(config)
missing_plugins =
Set.new( config[:plugins][:enabled] ) -
Set.new( @configurator_plugins.rake_plugins ) -
Set.new( @configurator_plugins.script_plugins )
missing_plugins.each do |plugin|
@stream_wrapper.stderr_puts("ERROR: Ceedling plugin '#{plugin}' contains no rake or ruby class entry point. (Misspelled or missing files?)")
end
return ( (missing_plugins.size > 0) ? false : true )
end
end

View File

@@ -0,0 +1,193 @@
require 'rubygems'
require 'rake' # for ext()
require 'ceedling/constants'
require 'ceedling/tool_executor' # for argument replacement pattern
require 'ceedling/file_path_utils' # for glob handling class methods
class ConfiguratorValidator
constructor :file_wrapper, :stream_wrapper, :system_wrapper
# walk into config hash verify existence of data at key depth
def exists?(config, *keys)
hash = retrieve_value(config, keys)
exist = !hash[:value].nil?
if (not exist)
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Required config file entry #{format_key_sequence(keys, hash[:depth])} does not exist.")
end
return exist
end
# walk into config hash. verify directory path(s) at given key depth
def validate_path_list(config, *keys)
hash = retrieve_value(config, keys)
list = hash[:value]
# return early if we couldn't walk into hash and find a value
return false if (list.nil?)
path_list = []
exist = true
case list
when String then path_list << list
when Array then path_list = list
end
path_list.each do |path|
base_path = FilePathUtils::extract_path(path) # lop off add/subtract notation & glob specifiers
if (not @file_wrapper.exist?(base_path))
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Config path #{format_key_sequence(keys, hash[:depth])}['#{base_path}'] does not exist on disk.")
exist = false
end
end
return exist
end
# simple path verification
def validate_filepath_simple(path, *keys)
validate_path = path
if (not @file_wrapper.exist?(validate_path))
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Config path '#{validate_path}' associated with #{format_key_sequence(keys, keys.size)} does not exist on disk.")
return false
end
return true
end
# walk into config hash. verify specified file exists.
def validate_filepath(config, *keys)
hash = retrieve_value(config, keys)
filepath = hash[:value]
# return early if we couldn't walk into hash and find a value
return false if (filepath.nil?)
# skip everything if we've got an argument replacement pattern
return true if (filepath =~ TOOL_EXECUTOR_ARGUMENT_REPLACEMENT_PATTERN)
if (not @file_wrapper.exist?(filepath))
# See if we can deal with it internally.
if GENERATED_DIR_PATH.include?(filepath)
# we already made this directory before let's make it again.
FileUtils.mkdir_p File.join(File.dirname(__FILE__), filepath)
@stream_wrapper.stderr_puts("WARNING: Generated filepath #{format_key_sequence(keys, hash[:depth])}['#{filepath}'] does not exist on disk. Recreating")
else
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Config filepath #{format_key_sequence(keys, hash[:depth])}['#{filepath}'] does not exist on disk.")
return false
end
end
return true
end
# walk into config hash. verify specified file exists.
def validate_executable_filepath(config, *keys)
exe_extension = config[:extension][:executable]
hash = retrieve_value(config, keys)
filepath = hash[:value]
# return early if we couldn't walk into hash and find a value
return false if (filepath.nil?)
# skip everything if we've got an argument replacement pattern
return true if (filepath =~ TOOL_EXECUTOR_ARGUMENT_REPLACEMENT_PATTERN)
# if there's no path included, verify file exists somewhere in system search paths
if (not filepath.include?('/'))
exists = false
@system_wrapper.search_paths.each do |path|
if (@file_wrapper.exist?( File.join(path, filepath)) )
exists = true
break
end
if (@file_wrapper.exist?( (File.join(path, filepath)).ext( exe_extension ) ))
exists = true
break
elsif (@system_wrapper.windows? and @file_wrapper.exist?( (File.join(path, filepath)).ext( EXTENSION_WIN_EXE ) ))
exists = true
break
end
end
if (not exists)
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Config filepath #{format_key_sequence(keys, hash[:depth])}['#{filepath}'] does not exist in system search paths.")
return false
end
# if there is a path included, check that explicit filepath exists
else
if (not @file_wrapper.exist?(filepath))
# no verbosity checking since this is lowest level anyhow & verbosity checking depends on configurator
@stream_wrapper.stderr_puts("ERROR: Config filepath #{format_key_sequence(keys, hash[:depth])}['#{filepath}'] does not exist on disk.")
return false
end
end
return true
end
def validate_tool_stderr_redirect(config, tools, tool)
redirect = config[tools][tool][:stderr_redirect]
if (redirect.class == Symbol)
# map constants and force to array of strings for runtime universality across ruby versions
if (not StdErrRedirect.constants.map{|constant| constant.to_s}.include?(redirect.to_s.upcase))
error = "ERROR: [:#{tools}][:#{tool}][:stderr_redirect][:#{redirect}] is not a recognized option " +
"{#{StdErrRedirect.constants.map{|constant| ':' + constant.to_s.downcase}.join(', ')}}."
@stream_wrapper.stderr_puts(error)
return false
end
end
return true
end
private #########################################
def retrieve_value(config, keys)
value = nil
hash = config
depth = 0
# walk into hash & extract value at requested key sequence
keys.each do |symbol|
depth += 1
if (not hash[symbol].nil?)
hash = hash[symbol]
value = hash
else
value = nil
break
end
end
return {:value => value, :depth => depth}
end
def format_key_sequence(keys, depth)
walked_keys = keys.slice(0, depth)
formatted_keys = walked_keys.map{|key| "[:#{key.to_s}]"}
return formatted_keys.join
end
end

View File

@@ -0,0 +1,97 @@
class Verbosity
SILENT = 0 # as silent as possible (though there are some messages that must be spit out)
ERRORS = 1 # only errors
COMPLAIN = 2 # spit out errors and warnings/notices
NORMAL = 3 # errors, warnings/notices, standard status messages
OBNOXIOUS = 4 # all messages including extra verbose output (used for lite debugging / verification)
DEBUG = 5 # special extra verbose output for hardcore debugging
end
class TestResultsSanityChecks
NONE = 0 # no sanity checking of test results
NORMAL = 1 # perform non-problematic checks
THOROUGH = 2 # perform checks that require inside knowledge of system workings
end
class StdErrRedirect
NONE = :none
AUTO = :auto
WIN = :win
UNIX = :unix
TCSH = :tcsh
end
class BackgroundExec
NONE = :none
AUTO = :auto
WIN = :win
UNIX = :unix
end
unless defined?(PROJECT_ROOT)
PROJECT_ROOT = Dir.pwd()
end
GENERATED_DIR_PATH = [['vendor', 'ceedling'], 'src', "test", ['test', 'support'], 'build'].each{|p| File.join(*p)}
EXTENSION_WIN_EXE = '.exe'
EXTENSION_NONWIN_EXE = '.out'
CEXCEPTION_ROOT_PATH = 'c_exception'
CEXCEPTION_LIB_PATH = "#{CEXCEPTION_ROOT_PATH}/lib"
CEXCEPTION_C_FILE = 'CException.c'
CEXCEPTION_H_FILE = 'CException.h'
UNITY_ROOT_PATH = 'unity'
UNITY_LIB_PATH = "#{UNITY_ROOT_PATH}/src"
UNITY_C_FILE = 'unity.c'
UNITY_H_FILE = 'unity.h'
UNITY_INTERNALS_H_FILE = 'unity_internals.h'
CMOCK_ROOT_PATH = 'cmock'
CMOCK_LIB_PATH = "#{CMOCK_ROOT_PATH}/src"
CMOCK_C_FILE = 'cmock.c'
CMOCK_H_FILE = 'cmock.h'
DEFAULT_CEEDLING_MAIN_PROJECT_FILE = 'project.yml' unless defined?(DEFAULT_CEEDLING_MAIN_PROJECT_FILE) # main project file
DEFAULT_CEEDLING_USER_PROJECT_FILE = 'user.yml' unless defined?(DEFAULT_CEEDLING_USER_PROJECT_FILE) # supplemental user config file
INPUT_CONFIGURATION_CACHE_FILE = 'input.yml' unless defined?(INPUT_CONFIGURATION_CACHE_FILE) # input configuration file dump
DEFINES_DEPENDENCY_CACHE_FILE = 'defines_dependency.yml' unless defined?(DEFINES_DEPENDENCY_CACHE_FILE) # preprocessor definitions for files
TEST_ROOT_NAME = 'test' unless defined?(TEST_ROOT_NAME)
TEST_TASK_ROOT = TEST_ROOT_NAME + ':' unless defined?(TEST_TASK_ROOT)
TEST_SYM = TEST_ROOT_NAME.to_sym unless defined?(TEST_SYM)
RELEASE_ROOT_NAME = 'release' unless defined?(RELEASE_ROOT_NAME)
RELEASE_TASK_ROOT = RELEASE_ROOT_NAME + ':' unless defined?(RELEASE_TASK_ROOT)
RELEASE_SYM = RELEASE_ROOT_NAME.to_sym unless defined?(RELEASE_SYM)
REFRESH_ROOT_NAME = 'refresh' unless defined?(REFRESH_ROOT_NAME)
REFRESH_TASK_ROOT = REFRESH_ROOT_NAME + ':' unless defined?(REFRESH_TASK_ROOT)
REFRESH_SYM = REFRESH_ROOT_NAME.to_sym unless defined?(REFRESH_SYM)
UTILS_ROOT_NAME = 'utils' unless defined?(UTILS_ROOT_NAME)
UTILS_TASK_ROOT = UTILS_ROOT_NAME + ':' unless defined?(UTILS_TASK_ROOT)
UTILS_SYM = UTILS_ROOT_NAME.to_sym unless defined?(UTILS_SYM)
OPERATION_COMPILE_SYM = :compile unless defined?(OPERATION_COMPILE_SYM)
OPERATION_ASSEMBLE_SYM = :assemble unless defined?(OPERATION_ASSEMBLE_SYM)
OPERATION_LINK_SYM = :link unless defined?(OPERATION_LINK_SYM)
RUBY_STRING_REPLACEMENT_PATTERN = /#\{.+\}/
RUBY_EVAL_REPLACEMENT_PATTERN = /^\{(.+)\}$/
TOOL_EXECUTOR_ARGUMENT_REPLACEMENT_PATTERN = /(\$\{(\d+)\})/
TEST_STDOUT_STATISTICS_PATTERN = /\n-+\s*(\d+)\s+Tests\s+(\d+)\s+Failures\s+(\d+)\s+Ignored\s+(OK|FAIL)\s*/i
NULL_FILE_PATH = '/dev/null'
TESTS_BASE_PATH = TEST_ROOT_NAME
RELEASE_BASE_PATH = RELEASE_ROOT_NAME

View File

@@ -0,0 +1,418 @@
require 'ceedling/constants'
require 'ceedling/system_wrapper'
require 'ceedling/file_path_utils'
#this should be defined already, but not always during system specs
CEEDLING_VENDOR = File.expand_path(File.dirname(__FILE__) + '/../../vendor') unless defined? CEEDLING_VENDOR
CEEDLING_PLUGINS = [] unless defined? CEEDLING_PLUGINS
DEFAULT_TEST_COMPILER_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_test_compiler'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_TEST_AND_VENDOR'}.freeze,
"-DGNU_COMPILER".freeze,
"-g".freeze,
"-c \"${1}\"".freeze,
"-o \"${2}\"".freeze,
# gcc's list file output options are complex; no use of ${3} parameter in default config
"-MMD".freeze,
"-MF \"${4}\"".freeze,
].freeze
}
DEFAULT_TEST_LINKER_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_test_linker'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
"\"${1}\"".freeze,
"-o \"${2}\"".freeze,
"".freeze,
"${4}".freeze
].freeze
}
DEFAULT_TEST_FIXTURE_TOOL = {
:executable => '${1}'.freeze,
:name => 'default_test_fixture'.freeze,
:stderr_redirect => StdErrRedirect::AUTO.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [].freeze
}
DEFAULT_TEST_INCLUDES_PREPROCESSOR_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_test_includes_preprocessor'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
'-E'.freeze, # OSX clang
'-MM'.freeze,
'-MG'.freeze,
# avoid some possibility of deep system lib header file complications by omitting vendor paths
# if cpp is run on *nix system, escape spaces in paths; if cpp on windows just use the paths collection as is
# {"-I\"$\"" => "{SystemWrapper.windows? ? COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE : COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE.map{|path| path.gsub(\/ \/, \'\\\\ \') }}"}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_TEST_AND_VENDOR'}.freeze,
{"-D$" => 'DEFINES_TEST_PREPROCESS'}.freeze,
"-DGNU_COMPILER".freeze, # OSX clang
'-w'.freeze,
# '-nostdinc'.freeze, # disabled temporarily due to stdio access violations on OSX
"\"${1}\"".freeze
].freeze
}
DEFAULT_TEST_FILE_PREPROCESSOR_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_test_file_preprocessor'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
'-E'.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_TEST_AND_VENDOR'}.freeze,
{"-D$" => 'DEFINES_TEST_PREPROCESS'}.freeze,
"-DGNU_COMPILER".freeze,
# '-nostdinc'.freeze, # disabled temporarily due to stdio access violations on OSX
"\"${1}\"".freeze,
"-o \"${2}\"".freeze
].freeze
}
# Disable the -MD flag for OSX LLVM Clang, since unsupported
if RUBY_PLATFORM =~ /darwin/ && `gcc --version 2> /dev/null` =~ /Apple LLVM version .* \(clang/m # OSX w/LLVM Clang
MD_FLAG = '' # Clang doesn't support the -MD flag
else
MD_FLAG = '-MD'
end
DEFAULT_TEST_DEPENDENCIES_GENERATOR_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_test_dependencies_generator'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
'-E'.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_TEST_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_TEST_AND_VENDOR'}.freeze,
{"-D$" => 'DEFINES_TEST_PREPROCESS'}.freeze,
"-DGNU_COMPILER".freeze,
"-MT \"${3}\"".freeze,
'-MM'.freeze,
MD_FLAG.freeze,
'-MG'.freeze,
"-MF \"${2}\"".freeze,
"-c \"${1}\"".freeze,
# '-nostdinc'.freeze,
].freeze
}
DEFAULT_RELEASE_DEPENDENCIES_GENERATOR_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_release_dependencies_generator'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
'-E'.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_RELEASE_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_RELEASE_AND_VENDOR'}.freeze,
{"-D$" => 'DEFINES_RELEASE_PREPROCESS'}.freeze,
"-DGNU_COMPILER".freeze,
"-MT \"${3}\"".freeze,
'-MM'.freeze,
MD_FLAG.freeze,
'-MG'.freeze,
"-MF \"${2}\"".freeze,
"-c \"${1}\"".freeze,
# '-nostdinc'.freeze,
].freeze
}
DEFAULT_RELEASE_COMPILER_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_release_compiler'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
{"-I\"$\"" => 'COLLECTION_PATHS_SOURCE_INCLUDE_VENDOR'}.freeze,
{"-I\"$\"" => 'COLLECTION_PATHS_RELEASE_TOOLCHAIN_INCLUDE'}.freeze,
{"-D$" => 'COLLECTION_DEFINES_RELEASE_AND_VENDOR'}.freeze,
"-DGNU_COMPILER".freeze,
"-c \"${1}\"".freeze,
"-o \"${2}\"".freeze,
# gcc's list file output options are complex; no use of ${3} parameter in default config
"-MMD".freeze,
"-MF \"${4}\"".freeze,
].freeze
}
DEFAULT_RELEASE_ASSEMBLER_TOOL = {
:executable => FilePathUtils.os_executable_ext('as').freeze,
:name => 'default_release_assembler'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
{"-I\"$\"" => 'COLLECTION_PATHS_SOURCE_AND_INCLUDE'}.freeze,
"\"${1}\"".freeze,
"-o \"${2}\"".freeze,
].freeze
}
DEFAULT_RELEASE_LINKER_TOOL = {
:executable => FilePathUtils.os_executable_ext('gcc').freeze,
:name => 'default_release_linker'.freeze,
:stderr_redirect => StdErrRedirect::NONE.freeze,
:background_exec => BackgroundExec::NONE.freeze,
:optional => false.freeze,
:arguments => [
"\"${1}\"".freeze,
"-o \"${2}\"".freeze,
"".freeze,
"${4}".freeze
].freeze
}
DEFAULT_TOOLS_TEST = {
:tools => {
:test_compiler => DEFAULT_TEST_COMPILER_TOOL,
:test_linker => DEFAULT_TEST_LINKER_TOOL,
:test_fixture => DEFAULT_TEST_FIXTURE_TOOL,
}
}
DEFAULT_TOOLS_TEST_PREPROCESSORS = {
:tools => {
:test_includes_preprocessor => DEFAULT_TEST_INCLUDES_PREPROCESSOR_TOOL,
:test_file_preprocessor => DEFAULT_TEST_FILE_PREPROCESSOR_TOOL,
}
}
DEFAULT_TOOLS_TEST_DEPENDENCIES = {
:tools => {
:test_dependencies_generator => DEFAULT_TEST_DEPENDENCIES_GENERATOR_TOOL,
}
}
DEFAULT_TOOLS_RELEASE = {
:tools => {
:release_compiler => DEFAULT_RELEASE_COMPILER_TOOL,
:release_linker => DEFAULT_RELEASE_LINKER_TOOL,
}
}
DEFAULT_TOOLS_RELEASE_ASSEMBLER = {
:tools => {
:release_assembler => DEFAULT_RELEASE_ASSEMBLER_TOOL,
}
}
DEFAULT_TOOLS_RELEASE_DEPENDENCIES = {
:tools => {
:release_dependencies_generator => DEFAULT_RELEASE_DEPENDENCIES_GENERATOR_TOOL,
}
}
DEFAULT_RELEASE_TARGET_NAME = 'project'
DEFAULT_CEEDLING_CONFIG = {
:project => {
# :build_root must be set by user
:use_exceptions => true,
:use_mocks => true,
:compile_threads => 1,
:test_threads => 1,
:use_test_preprocessor => false,
:use_deep_dependencies => false,
:generate_deep_dependencies => true, # only applicable if use_deep_dependencies is true
:test_file_prefix => 'test_',
:options_paths => [],
:release_build => false,
},
:release_build => {
# :output is set while building configuration -- allows smart default system-dependent file extension handling
:use_assembly => false,
:artifacts => [],
},
:paths => {
:test => [], # must be populated by user
:source => [], # must be populated by user
:support => [],
:include => [],
:test_toolchain_include => [],
:release_toolchain_include => [],
},
:files => {
:test => [],
:source => [],
:assembly => [],
:support => [],
:include => [],
},
# unlike other top-level entries, environment's value is an array to preserve order
:environment => [
# when evaluated, this provides wider text field for rake task comments
{:rake_columns => '120'},
],
:defines => {
:test => [],
:test_preprocess => [],
:release => [],
:release_preprocess => [],
:use_test_definition => false,
},
:libraries => {
:test => [],
:test_preprocess => [],
:release => [],
:release_preprocess => [],
},
:flags => {},
:extension => {
:header => '.h',
:source => '.c',
:assembly => '.s',
:object => '.o',
:executable => ( SystemWrapper.windows? ? EXTENSION_WIN_EXE : EXTENSION_NONWIN_EXE ),
:map => '.map',
:list => '.lst',
:testpass => '.pass',
:testfail => '.fail',
:dependencies => '.d',
},
:unity => {
:vendor_path => CEEDLING_VENDOR,
:defines => []
},
:cmock => {
:vendor_path => CEEDLING_VENDOR,
:defines => [],
:includes => []
},
:cexception => {
:vendor_path => CEEDLING_VENDOR,
:defines => []
},
:test_runner => {
:includes => [],
:file_suffix => '_runner',
},
# all tools populated while building up config structure
:tools => {},
# empty argument lists for default tools
# (these can be overridden in project file to add arguments to tools without totally redefining tools)
:test_compiler => { :arguments => [] },
:test_linker => { :arguments => [] },
:test_fixture => {
:arguments => [],
:link_objects => [], # compiled object files to always be linked in (e.g. cmock.o if using mocks)
},
:test_includes_preprocessor => { :arguments => [] },
:test_file_preprocessor => { :arguments => [] },
:test_dependencies_generator => { :arguments => [] },
:release_compiler => { :arguments => [] },
:release_linker => { :arguments => [] },
:release_assembler => { :arguments => [] },
:release_dependencies_generator => { :arguments => [] },
:plugins => {
:load_paths => CEEDLING_PLUGINS,
:enabled => [],
}
}.freeze
DEFAULT_TESTS_RESULTS_REPORT_TEMPLATE = %q{
% ignored = hash[:results][:counts][:ignored]
% failed = hash[:results][:counts][:failed]
% stdout_count = hash[:results][:counts][:stdout]
% header_prepend = ((hash[:header].length > 0) ? "#{hash[:header]}: " : '')
% banner_width = 25 + header_prepend.length # widest message
% if (stdout_count > 0)
<%=@ceedling[:plugin_reportinator].generate_banner(header_prepend + 'TEST OUTPUT')%>
% hash[:results][:stdout].each do |string|
% string[:collection].each do |item|
<%=string[:source][:path]%><%=File::SEPARATOR%><%=string[:source][:file]%>: "<%=item%>"
% end
% end
% end
% if (ignored > 0)
<%=@ceedling[:plugin_reportinator].generate_banner(header_prepend + 'IGNORED TEST SUMMARY')%>
% hash[:results][:ignores].each do |ignore|
% ignore[:collection].each do |item|
<%=ignore[:source][:path]%><%=File::SEPARATOR%><%=ignore[:source][:file]%>:<%=item[:line]%>:<%=item[:test]%>
% if (item[:message].length > 0)
: "<%=item[:message]%>"
% else
<%="\n"%>
% end
% end
% end
% end
% if (failed > 0)
<%=@ceedling[:plugin_reportinator].generate_banner(header_prepend + 'FAILED TEST SUMMARY')%>
% hash[:results][:failures].each do |failure|
% failure[:collection].each do |item|
<%=failure[:source][:path]%><%=File::SEPARATOR%><%=failure[:source][:file]%>:<%=item[:line]%>:<%=item[:test]%>
% if (item[:message].length > 0)
: "<%=item[:message]%>"
% else
<%="\n"%>
% end
% end
% end
% end
% total_string = hash[:results][:counts][:total].to_s
% format_string = "%#{total_string.length}i"
<%=@ceedling[:plugin_reportinator].generate_banner(header_prepend + 'OVERALL TEST SUMMARY')%>
% if (hash[:results][:counts][:total] > 0)
TESTED: <%=hash[:results][:counts][:total].to_s%>
PASSED: <%=sprintf(format_string, hash[:results][:counts][:passed])%>
FAILED: <%=sprintf(format_string, failed)%>
IGNORED: <%=sprintf(format_string, ignored)%>
% else
No tests executed.
% end
}

View File

@@ -0,0 +1,98 @@
class Dependinator
constructor :configurator, :project_config_manager, :test_includes_extractor, :file_path_utils, :rake_wrapper, :file_wrapper
def touch_force_rebuild_files
@file_wrapper.touch( @configurator.project_test_force_rebuild_filepath )
@file_wrapper.touch( @configurator.project_release_force_rebuild_filepath ) if (@configurator.project_release_build)
end
def load_release_object_deep_dependencies(dependencies_list)
dependencies_list.each do |dependencies_file|
if File.exists?(dependencies_file)
@rake_wrapper.load_dependencies( dependencies_file )
end
end
end
def enhance_release_file_dependencies(files)
files.each do |filepath|
@rake_wrapper[filepath].enhance( [@configurator.project_release_force_rebuild_filepath] ) if (@project_config_manager.release_config_changed)
end
end
def load_test_object_deep_dependencies(files_list)
dependencies_list = @file_path_utils.form_test_dependencies_filelist(files_list)
dependencies_list.each do |dependencies_file|
if File.exists?(dependencies_file)
@rake_wrapper.load_dependencies(dependencies_file)
end
end
end
def enhance_runner_dependencies(runner_filepath)
@rake_wrapper[runner_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
def enhance_shallow_include_lists_dependencies(include_lists)
include_lists.each do |include_list_filepath|
@rake_wrapper[include_list_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
end
def enhance_preprocesed_file_dependencies(files)
files.each do |filepath|
@rake_wrapper[filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
end
def enhance_mock_dependencies(mocks_list)
# if input configuration or ceedling changes, make sure these guys get rebuilt
mocks_list.each do |mock_filepath|
@rake_wrapper[mock_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
@rake_wrapper[mock_filepath].enhance( @configurator.cmock_unity_helper ) if (@configurator.cmock_unity_helper)
end
end
def enhance_dependencies_dependencies(dependencies)
dependencies.each do |dependencies_filepath|
@rake_wrapper[dependencies_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
end
def enhance_test_build_object_dependencies(objects)
objects.each do |object_filepath|
@rake_wrapper[object_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
end
def enhance_results_dependencies(result_filepath)
@rake_wrapper[result_filepath].enhance( [@configurator.project_test_force_rebuild_filepath] ) if (@project_config_manager.test_config_changed ||
@project_config_manager.test_defines_changed)
end
def setup_test_executable_dependencies(test, objects)
@rake_wrapper.create_file_task( @file_path_utils.form_test_executable_filepath(test), objects )
end
end

View File

@@ -0,0 +1,9 @@
require 'erb'
class ErbWrapper
def generate_file(template, data, output_file)
File.open(output_file, "w") do |f|
f << ERB.new(template, 0, "<>").result(binding)
end
end
end

View File

@@ -0,0 +1,149 @@
require 'rubygems'
require 'rake' # for adding ext() method to string
require 'thread'
class FileFinder
SEMAPHORE = Mutex.new
constructor :configurator, :file_finder_helper, :cacheinator, :file_path_utils, :file_wrapper, :yaml_wrapper
def prepare_search_sources
@all_test_source_and_header_file_collection =
@configurator.collection_all_tests +
@configurator.collection_all_source +
@configurator.collection_all_headers
end
def find_header_file(mock_file)
header = File.basename(mock_file).sub(/#{@configurator.cmock_mock_prefix}/, '').ext(@configurator.extension_header)
found_path = @file_finder_helper.find_file_in_collection(header, @configurator.collection_all_headers, :error)
return found_path
end
def find_header_input_for_mock_file(mock_file)
found_path = find_header_file(mock_file)
mock_input = found_path
if (@configurator.project_use_test_preprocessor)
mock_input = @cacheinator.diff_cached_test_file( @file_path_utils.form_preprocessed_file_filepath( found_path ) )
end
return mock_input
end
def find_source_from_test(test, complain)
test_prefix = @configurator.project_test_file_prefix
source_paths = @configurator.collection_all_source
source = File.basename(test).sub(/#{test_prefix}/, '')
# we don't blow up if a test file has no corresponding source file
return @file_finder_helper.find_file_in_collection(source, source_paths, complain)
end
def find_test_from_runner_path(runner_path)
extension_source = @configurator.extension_source
test_file = File.basename(runner_path).sub(/#{@configurator.test_runner_file_suffix}#{'\\'+extension_source}/, extension_source)
found_path = @file_finder_helper.find_file_in_collection(test_file, @configurator.collection_all_tests, :error)
return found_path
end
def find_test_input_for_runner_file(runner_path)
found_path = find_test_from_runner_path(runner_path)
runner_input = found_path
if (@configurator.project_use_test_preprocessor)
runner_input = @cacheinator.diff_cached_test_file( @file_path_utils.form_preprocessed_file_filepath( found_path ) )
end
return runner_input
end
def find_test_from_file_path(file_path)
test_file = File.basename(file_path).ext(@configurator.extension_source)
found_path = @file_finder_helper.find_file_in_collection(test_file, @configurator.collection_all_tests, :error)
return found_path
end
def find_test_or_source_or_header_file(file_path)
file = File.basename(file_path)
return @file_finder_helper.find_file_in_collection(file, @all_test_source_and_header_file_collection, :error)
end
def find_compilation_input_file(file_path, complain=:error, release=false)
found_file = nil
source_file = File.basename(file_path).ext(@configurator.extension_source)
# We only collect files that already exist when we start up.
# FileLists can produce undesired results for dynamically generated files depending on when they're accessed.
# So collect mocks and runners separately and right now.
SEMAPHORE.synchronize {
if (source_file =~ /#{@configurator.test_runner_file_suffix}/)
found_file =
@file_finder_helper.find_file_in_collection(
source_file,
@file_wrapper.directory_listing( File.join(@configurator.project_test_runners_path, '*') ),
complain)
elsif (@configurator.project_use_mocks and (source_file =~ /#{@configurator.cmock_mock_prefix}/))
found_file =
@file_finder_helper.find_file_in_collection(
source_file,
@file_wrapper.directory_listing( File.join(@configurator.cmock_mock_path, '*') ),
complain)
elsif release
found_file =
@file_finder_helper.find_file_in_collection(
source_file,
@configurator.collection_release_existing_compilation_input,
complain)
else
temp_complain = (defined?(TEST_BUILD_USE_ASSEMBLY) && TEST_BUILD_USE_ASSEMBLY) ? :ignore : complain
found_file =
@file_finder_helper.find_file_in_collection(
source_file,
@configurator.collection_all_existing_compilation_input,
temp_complain)
found_file ||= find_assembly_file(file_path, false) if (defined?(TEST_BUILD_USE_ASSEMBLY) && TEST_BUILD_USE_ASSEMBLY)
end
}
return found_file
end
def find_source_file(file_path, complain)
source_file = File.basename(file_path).ext(@configurator.extension_source)
return @file_finder_helper.find_file_in_collection(source_file, @configurator.collection_all_source, complain)
end
def find_assembly_file(file_path, complain = :error)
assembly_file = File.basename(file_path).ext(@configurator.extension_assembly)
return @file_finder_helper.find_file_in_collection(assembly_file, @configurator.collection_all_assembly, complain)
end
def find_file_from_list(file_path, file_list, complain)
return @file_finder_helper.find_file_in_collection(file_path, file_list, complain)
end
end

View File

@@ -0,0 +1,54 @@
require 'fileutils'
require 'ceedling/constants' # for Verbosity enumeration
class FileFinderHelper
constructor :streaminator
def find_file_in_collection(file_name, file_list, complain, extra_message="")
file_to_find = nil
file_list.each do |item|
base_file = File.basename(item)
# case insensitive comparison
if (base_file.casecmp(file_name) == 0)
# case sensitive check
if (base_file == file_name)
file_to_find = item
break
else
blow_up(file_name, "However, a filename having different capitalization was found: '#{item}'.")
end
end
end
case (complain)
when :error then blow_up(file_name, extra_message) if (file_to_find.nil?)
when :warn then gripe(file_name, extra_message) if (file_to_find.nil?)
#when :ignore then
end
return file_to_find
end
private
def blow_up(file_name, extra_message="")
error = "ERROR: Found no file '#{file_name}' in search paths."
error += ' ' if (extra_message.length > 0)
@streaminator.stderr_puts(error + extra_message, Verbosity::ERRORS)
raise
end
def gripe(file_name, extra_message="")
warning = "WARNING: Found no file '#{file_name}' in search paths."
warning += ' ' if (extra_message.length > 0)
@streaminator.stderr_puts(warning + extra_message, Verbosity::COMPLAIN)
end
end

View File

@@ -0,0 +1,200 @@
require 'rubygems'
require 'rake' # for ext()
require 'fileutils'
require 'ceedling/system_wrapper'
# global utility methods (for plugins, project files, etc.)
def ceedling_form_filepath(destination_path, original_filepath, new_extension=nil)
filename = File.basename(original_filepath)
filename.replace(filename.ext(new_extension)) if (!new_extension.nil?)
return File.join( destination_path.gsub(/\\/, '/'), filename )
end
class FilePathUtils
GLOB_MATCHER = /[\*\?\{\}\[\]]/
constructor :configurator, :file_wrapper
######### class methods ##########
# standardize path to use '/' path separator & have no trailing path separator
def self.standardize(path)
path.strip!
path.gsub!(/\\/, '/')
path.chomp!('/')
return path
end
def self.os_executable_ext(executable)
return executable.ext('.exe') if SystemWrapper.windows?
return executable
end
# extract directory path from between optional add/subtract aggregation modifiers and up to glob specifiers
# note: slightly different than File.dirname in that /files/foo remains /files/foo and does not become /files
def self.extract_path(path)
path = path.sub(/^(\+|-):/, '')
# find first occurrence of path separator followed by directory glob specifier: *, ?, {, }, [, ]
find_index = (path =~ GLOB_MATCHER)
# no changes needed (lop off final path separator)
return path.chomp('/') if (find_index.nil?)
# extract up to first glob specifier
path = path[0..(find_index-1)]
# lop off everything up to and including final path separator
find_index = path.rindex('/')
return path[0..(find_index-1)] if (not find_index.nil?)
# return string up to first glob specifier if no path separator found
return path
end
# return whether the given path is to be aggregated (no aggregation modifier defaults to same as +:)
def self.add_path?(path)
return (path =~ /^-:/).nil?
end
# get path (and glob) lopping off optional +: / -: prefixed aggregation modifiers
def self.extract_path_no_aggregation_operators(path)
return path.sub(/^(\+|-):/, '')
end
# all the globs that may be in a path string work fine with one exception;
# to recurse through all subdirectories, the glob is dir/**/** but our paths use
# convention of only dir/**
def self.reform_glob(path)
return path if (path =~ /\/\*\*$/).nil?
return path + '/**'
end
######### instance methods ##########
def form_temp_path(filepath, prefix='')
return File.join( @configurator.project_temp_path, prefix + File.basename(filepath) )
end
### release ###
def form_release_build_cache_path(filepath)
return File.join( @configurator.project_release_build_cache_path, File.basename(filepath) )
end
def form_release_dependencies_filepath(filepath)
return File.join( @configurator.project_release_dependencies_path, File.basename(filepath).ext(@configurator.extension_dependencies) )
end
def form_release_build_c_object_filepath(filepath)
return File.join( @configurator.project_release_build_output_c_path, File.basename(filepath).ext(@configurator.extension_object) )
end
def form_release_build_asm_object_filepath(filepath)
return File.join( @configurator.project_release_build_output_asm_path, File.basename(filepath).ext(@configurator.extension_object) )
end
def form_release_build_c_objects_filelist(files)
return (@file_wrapper.instantiate_file_list(files)).pathmap("#{@configurator.project_release_build_output_c_path}/%n#{@configurator.extension_object}")
end
def form_release_build_asm_objects_filelist(files)
return (@file_wrapper.instantiate_file_list(files)).pathmap("#{@configurator.project_release_build_output_asm_path}/%n#{@configurator.extension_object}")
end
def form_release_build_c_list_filepath(filepath)
return File.join( @configurator.project_release_build_output_c_path, File.basename(filepath).ext(@configurator.extension_list) )
end
def form_release_dependencies_filelist(files)
return (@file_wrapper.instantiate_file_list(files)).pathmap("#{@configurator.project_release_dependencies_path}/%n#{@configurator.extension_dependencies}")
end
### tests ###
def form_test_build_cache_path(filepath)
return File.join( @configurator.project_test_build_cache_path, File.basename(filepath) )
end
def form_test_dependencies_filepath(filepath)
return File.join( @configurator.project_test_dependencies_path, File.basename(filepath).ext(@configurator.extension_dependencies) )
end
def form_pass_results_filepath(filepath)
return File.join( @configurator.project_test_results_path, File.basename(filepath).ext(@configurator.extension_testpass) )
end
def form_fail_results_filepath(filepath)
return File.join( @configurator.project_test_results_path, File.basename(filepath).ext(@configurator.extension_testfail) )
end
def form_runner_filepath_from_test(filepath)
return File.join( @configurator.project_test_runners_path, File.basename(filepath, @configurator.extension_source)) + @configurator.test_runner_file_suffix + @configurator.extension_source
end
def form_test_filepath_from_runner(filepath)
return filepath.sub(/#{TEST_RUNNER_FILE_SUFFIX}/, '')
end
def form_runner_object_filepath_from_test(filepath)
return (form_test_build_c_object_filepath(filepath)).sub(/(#{@configurator.extension_object})$/, "#{@configurator.test_runner_file_suffix}\\1")
end
def form_test_build_c_object_filepath(filepath)
return File.join( @configurator.project_test_build_output_c_path, File.basename(filepath).ext(@configurator.extension_object) )
end
def form_test_build_asm_object_filepath(filepath)
return File.join( @configurator.project_test_build_output_asm_path, File.basename(filepath).ext(@configurator.extension_object) )
end
def form_test_executable_filepath(filepath)
return File.join( @configurator.project_test_build_output_path, File.basename(filepath).ext(@configurator.extension_executable) )
end
def form_test_build_map_filepath(filepath)
return File.join( @configurator.project_test_build_output_path, File.basename(filepath).ext(@configurator.extension_map) )
end
def form_test_build_list_filepath(filepath)
return File.join( @configurator.project_test_build_output_path, File.basename(filepath).ext(@configurator.extension_list) )
end
def form_preprocessed_file_filepath(filepath)
return File.join( @configurator.project_test_preprocess_files_path, File.basename(filepath) )
end
def form_preprocessed_includes_list_filepath(filepath)
return File.join( @configurator.project_test_preprocess_includes_path, File.basename(filepath) )
end
def form_test_build_objects_filelist(sources)
return (@file_wrapper.instantiate_file_list(sources)).pathmap("#{@configurator.project_test_build_output_c_path}/%n#{@configurator.extension_object}")
end
def form_preprocessed_mockable_headers_filelist(mocks)
list = @file_wrapper.instantiate_file_list(mocks)
headers = list.map do |file|
module_name = File.basename(file).sub(/^#{@configurator.cmock_mock_prefix}/, '').sub(/\.[a-zA-Z]+$/,'')
"#{@configurator.project_test_preprocess_files_path}/#{module_name}#{@configurator.extension_header}"
end
return headers
end
def form_mocks_source_filelist(mocks)
list = (@file_wrapper.instantiate_file_list(mocks))
sources = list.map{|file| "#{@configurator.cmock_mock_path}/#{file}#{@configurator.extension_source}"}
return sources
end
def form_test_dependencies_filelist(files)
list = @file_wrapper.instantiate_file_list(files)
return list.pathmap("#{@configurator.project_test_dependencies_path}/%n#{@configurator.extension_dependencies}")
end
def form_pass_results_filelist(path, files)
list = @file_wrapper.instantiate_file_list(files)
return list.pathmap("#{path}/%n#{@configurator.extension_testpass}")
end
end

View File

@@ -0,0 +1,69 @@
require 'rubygems'
require 'rake'
require 'set'
require 'fileutils'
require 'ceedling/file_path_utils'
class FileSystemUtils
constructor :file_wrapper
# build up path list from input of one or more strings or arrays of (+/-) paths & globs
def collect_paths(*paths)
raw = [] # all paths and globs
plus = Set.new # all paths to expand and add
minus = Set.new # all paths to remove from plus set
# assemble all globs and simple paths, reforming our glob notation to ruby globs
paths.each do |paths_container|
case (paths_container)
when String then raw << (FilePathUtils::reform_glob(paths_container))
when Array then paths_container.each {|path| raw << (FilePathUtils::reform_glob(path))}
else raise "Don't know how to handle #{paths_container.class}"
end
end
# iterate through each path and glob
raw.each do |path|
dirs = [] # container for only (expanded) paths
# if a glob, expand it and slurp up all non-file paths
if path.include?('*')
# grab base directory only if globs are snug up to final path separator
if (path =~ /\/\*+$/)
dirs << FilePathUtils.extract_path(path)
end
# grab expanded sub-directory globs
expanded = @file_wrapper.directory_listing( FilePathUtils.extract_path_no_aggregation_operators(path) )
expanded.each do |entry|
dirs << entry if @file_wrapper.directory?(entry)
end
# else just grab simple path
# note: we could just run this through glob expansion but such an
# approach doesn't handle a path not yet on disk)
else
dirs << FilePathUtils.extract_path_no_aggregation_operators(path)
end
# add dirs to the appropriate set based on path aggregation modifier if present
FilePathUtils.add_path?(path) ? plus.merge(dirs) : minus.merge(dirs)
end
return (plus - minus).to_a.uniq
end
# given a file list, add to it or remove from it
def revise_file_list(list, revisions)
revisions.each do |revision|
# include or exclude file or glob to file list
file = FilePathUtils.extract_path_no_aggregation_operators( revision )
FilePathUtils.add_path?(revision) ? list.include(file) : list.exclude(file)
end
end
end

View File

@@ -0,0 +1,10 @@
class FileSystemWrapper
def cd(path)
FileUtils.cd path do
yield
end
end
end

View File

@@ -0,0 +1,83 @@
require 'rubygems'
require 'rake' # for FileList
require 'fileutils'
require 'ceedling/constants'
class FileWrapper
def get_expanded_path(path)
return File.expand_path(path)
end
def basename(path, extension=nil)
return File.basename(path, extension) if extension
return File.basename(path)
end
def exist?(filepath)
return true if (filepath == NULL_FILE_PATH)
return File.exist?(filepath)
end
def directory?(path)
return File.directory?(path)
end
def dirname(path)
return File.dirname(path)
end
def directory_listing(glob)
return Dir.glob(glob, File::FNM_PATHNAME)
end
def rm_f(filepath, options={})
FileUtils.rm_f(filepath, options)
end
def rm_r(filepath, options={})
FileUtils.rm_r(filepath, options={})
end
def cp(source, destination, options={})
FileUtils.cp(source, destination, options)
end
def compare(from, to)
return FileUtils.compare_file(from, to)
end
def open(filepath, flags)
File.open(filepath, flags) do |file|
yield(file)
end
end
def read(filepath)
return File.read(filepath)
end
def touch(filepath, options={})
FileUtils.touch(filepath, options)
end
def write(filepath, contents, flags='w')
File.open(filepath, flags) do |file|
file.write(contents)
end
end
def readlines(filepath)
return File.readlines(filepath)
end
def instantiate_file_list(files=[])
return FileList.new(files)
end
def mkdir(folder)
return FileUtils.mkdir_p(folder)
end
end

View File

@@ -0,0 +1,74 @@
require 'rubygems'
require 'rake' # for ext()
require 'fileutils'
require 'ceedling/constants'
# :flags:
# :release:
# :compile:
# :'test_.+'
# - -pedantic # add '-pedantic' to every test file
# :*: # add '-foo' to compilation of all files not main.c
# - -foo
# :main: # add '-Wall' to compilation of main.c
# - -Wall
# :test:
# :link:
# :test_main: # add '--bar --baz' to linking of test_main.exe
# - --bar
# - --baz
def partition(hash, &predicate)
hash.partition(&predicate).map(&:to_h)
end
class Flaginator
constructor :configurator
def get_flag(hash, file_name)
file_key = file_name.to_sym
# 1. try literals
literals, magic = partition(hash) { |k, v| k.to_s =~ /^\w+$/ }
return literals[file_key] if literals.include?(file_key)
any, regex = partition(magic) { |k, v| (k == :'*') || (k == :'.*') } # glob or regex wild card
# 2. try regexes
find_res = regex.find { |k, v| file_name =~ /^#{k.to_s}$/ }
return find_res[1] if find_res
# 3. try anything
find_res = any.find { |k, v| file_name =~ /.*/ }
return find_res[1] if find_res
# 4. well, we've tried
return []
end
def flag_down( operation, context, file )
# create configurator accessor method
accessor = ('flags_' + context.to_s).to_sym
# create simple filename key from whatever filename provided
file_name = File.basename( file ).ext('')
file_key = File.basename( file ).ext('').to_sym
# if no entry in configuration for flags for this context, bail out
return [] if not @configurator.respond_to?( accessor )
# get flags sub hash associated with this context
flags = @configurator.send( accessor )
# if operation not represented in flags hash, bail out
return [] if not flags.include?( operation )
# redefine flags to sub hash associated with the operation
flags = flags[operation]
return get_flag(flags, file_name)
end
end

View File

@@ -0,0 +1,183 @@
require 'ceedling/constants'
class Generator
constructor :configurator,
:generator_helper,
:preprocessinator,
:cmock_builder,
:generator_test_runner,
:generator_test_results,
:flaginator,
:test_includes_extractor,
:tool_executor,
:file_finder,
:file_path_utils,
:streaminator,
:plugin_manager,
:file_wrapper
def generate_shallow_includes_list(context, file)
@streaminator.stdout_puts("Generating include list for #{File.basename(file)}...", Verbosity::NORMAL)
@preprocessinator.preprocess_shallow_includes(file)
end
def generate_preprocessed_file(context, file)
@streaminator.stdout_puts("Preprocessing #{File.basename(file)}...", Verbosity::NORMAL)
@preprocessinator.preprocess_file(file)
end
def generate_dependencies_file(tool, context, source, object, dependencies)
@streaminator.stdout_puts("Generating dependencies for #{File.basename(source)}...", Verbosity::NORMAL)
command =
@tool_executor.build_command_line(
tool,
[], # extra per-file command line parameters
source,
dependencies,
object)
@tool_executor.exec( command[:line], command[:options] )
end
def generate_mock(context, header_filepath)
arg_hash = {:header_file => header_filepath, :context => context}
@plugin_manager.pre_mock_generate( arg_hash )
begin
@cmock_builder.cmock.setup_mocks( arg_hash[:header_file] )
rescue
raise
ensure
@plugin_manager.post_mock_generate( arg_hash )
end
end
# test_filepath may be either preprocessed test file or original test file
def generate_test_runner(context, test_filepath, runner_filepath)
arg_hash = {:context => context, :test_file => test_filepath, :runner_file => runner_filepath}
@plugin_manager.pre_runner_generate(arg_hash)
# collect info we need
module_name = File.basename(arg_hash[:test_file])
test_cases = @generator_test_runner.find_test_cases( @file_finder.find_test_from_runner_path(runner_filepath) )
mock_list = @test_includes_extractor.lookup_raw_mock_list(arg_hash[:test_file])
@streaminator.stdout_puts("Generating runner for #{module_name}...", Verbosity::NORMAL)
test_file_includes = [] # Empty list for now, since apparently unused
# build runner file
begin
@generator_test_runner.generate(module_name, runner_filepath, test_cases, mock_list, test_file_includes)
rescue
raise
ensure
@plugin_manager.post_runner_generate(arg_hash)
end
end
def generate_object_file(tool, operation, context, source, object, list='', dependencies='')
shell_result = {}
arg_hash = {:tool => tool, :operation => operation, :context => context, :source => source, :object => object, :list => list, :dependencies => dependencies}
@plugin_manager.pre_compile_execute(arg_hash)
@streaminator.stdout_puts("Compiling #{File.basename(arg_hash[:source])}...", Verbosity::NORMAL)
command =
@tool_executor.build_command_line( arg_hash[:tool],
@flaginator.flag_down( operation, context, source ),
arg_hash[:source],
arg_hash[:object],
arg_hash[:list],
arg_hash[:dependencies])
@streaminator.stdout_puts("Command: #{command}", Verbosity::DEBUG)
begin
shell_result = @tool_executor.exec( command[:line], command[:options] )
rescue ShellExecutionException => ex
shell_result = ex.shell_result
raise ex
ensure
arg_hash[:shell_result] = shell_result
@plugin_manager.post_compile_execute(arg_hash)
end
end
def generate_executable_file(tool, context, objects, executable, map='', libraries=[])
shell_result = {}
arg_hash = { :tool => tool,
:context => context,
:objects => objects,
:executable => executable,
:map => map,
:libraries => libraries
}
@plugin_manager.pre_link_execute(arg_hash)
@streaminator.stdout_puts("Linking #{File.basename(arg_hash[:executable])}...", Verbosity::NORMAL)
command =
@tool_executor.build_command_line( arg_hash[:tool],
@flaginator.flag_down( OPERATION_LINK_SYM, context, executable ),
arg_hash[:objects],
arg_hash[:executable],
arg_hash[:map],
arg_hash[:libraries]
)
@streaminator.stdout_puts("Command: #{command}", Verbosity::DEBUG)
begin
shell_result = @tool_executor.exec( command[:line], command[:options] )
rescue ShellExecutionException => ex
notice = "\n" +
"NOTICE: If the linker reports missing symbols, the following may be to blame:\n" +
" 1. Test lacks #include statements corresponding to needed source files.\n" +
" 2. Project search paths do not contain source files corresponding to #include statements in the test.\n"
if (@configurator.project_use_mocks)
notice += " 3. Test does not #include needed mocks.\n\n"
else
notice += "\n"
end
@streaminator.stderr_puts(notice, Verbosity::COMPLAIN)
shell_result = ex.shell_result
raise ''
ensure
arg_hash[:shell_result] = shell_result
@plugin_manager.post_link_execute(arg_hash)
end
end
def generate_test_results(tool, context, executable, result)
arg_hash = {:tool => tool, :context => context, :executable => executable, :result_file => result}
@plugin_manager.pre_test_fixture_execute(arg_hash)
@streaminator.stdout_puts("Running #{File.basename(arg_hash[:executable])}...", Verbosity::NORMAL)
# Unity's exit code is equivalent to the number of failed tests, so we tell @tool_executor not to fail out if there are failures
# so that we can run all tests and collect all results
command = @tool_executor.build_command_line(arg_hash[:tool], [], arg_hash[:executable])
@streaminator.stdout_puts("Command: #{command}", Verbosity::DEBUG)
command[:options][:boom] = false
shell_result = @tool_executor.exec( command[:line], command[:options] )
#Don't Let The Failure Count Make Us Believe Things Aren't Working
shell_result[:exit_code] = 0
@generator_helper.test_results_error_handler(executable, shell_result)
processed = @generator_test_results.process_and_write_results( shell_result,
arg_hash[:result_file],
@file_finder.find_test_from_file_path(arg_hash[:executable]) )
arg_hash[:result_file] = processed[:result_file]
arg_hash[:results] = processed[:results]
arg_hash[:shell_result] = shell_result # for raw output display if no plugins for formatted display
@plugin_manager.post_test_fixture_execute(arg_hash)
end
end

View File

@@ -0,0 +1,40 @@
require 'ceedling/constants'
class GeneratorHelper
constructor :streaminator
def test_results_error_handler(executable, shell_result)
notice = ''
error = false
if (shell_result[:output].nil? or shell_result[:output].strip.empty?)
error = true
# mirror style of generic tool_executor failure output
notice = "\n" +
"ERROR: Test executable \"#{File.basename(executable)}\" failed.\n" +
"> Produced no output to $stdout.\n"
elsif ((shell_result[:output] =~ TEST_STDOUT_STATISTICS_PATTERN).nil?)
error = true
# mirror style of generic tool_executor failure output
notice = "\n" +
"ERROR: Test executable \"#{File.basename(executable)}\" failed.\n" +
"> Produced no final test result counts in $stdout:\n" +
"#{shell_result[:output].strip}\n"
end
if (error)
# since we told the tool executor to ignore the exit code, handle it explicitly here
notice += "> And exited with status: [#{shell_result[:exit_code]}] (count of failed tests).\n" if (shell_result[:exit_code] != nil)
notice += "> And then likely crashed.\n" if (shell_result[:exit_code] == nil)
notice += "> This is often a symptom of a bad memory access in source or test code.\n\n"
@streaminator.stderr_puts(notice, Verbosity::COMPLAIN)
raise
end
end
end

View File

@@ -0,0 +1,89 @@
require 'rubygems'
require 'rake' # for .ext()
require 'ceedling/constants'
class GeneratorTestResults
constructor :configurator, :generator_test_results_sanity_checker, :yaml_wrapper
def process_and_write_results(unity_shell_result, results_file, test_file)
output_file = results_file
results = get_results_structure
results[:source][:path] = File.dirname(test_file)
results[:source][:file] = File.basename(test_file)
results[:time] = unity_shell_result[:time] unless unity_shell_result[:time].nil?
# process test statistics
if (unity_shell_result[:output] =~ TEST_STDOUT_STATISTICS_PATTERN)
results[:counts][:total] = $1.to_i
results[:counts][:failed] = $2.to_i
results[:counts][:ignored] = $3.to_i
results[:counts][:passed] = (results[:counts][:total] - results[:counts][:failed] - results[:counts][:ignored])
end
# remove test statistics lines
output_string = unity_shell_result[:output].sub(TEST_STDOUT_STATISTICS_PATTERN, '')
output_string.lines do |line|
# process unity output
case line
when /(:IGNORE)/
elements = extract_line_elements(line, results[:source][:file])
results[:ignores] << elements[0]
results[:stdout] << elements[1] if (!elements[1].nil?)
when /(:PASS$)/
elements = extract_line_elements(line, results[:source][:file])
results[:successes] << elements[0]
results[:stdout] << elements[1] if (!elements[1].nil?)
when /(:FAIL)/
elements = extract_line_elements(line, results[:source][:file])
results[:failures] << elements[0]
results[:stdout] << elements[1] if (!elements[1].nil?)
else # collect up all other
results[:stdout] << line.chomp
end
end
@generator_test_results_sanity_checker.verify(results, unity_shell_result[:exit_code])
output_file = results_file.ext(@configurator.extension_testfail) if (results[:counts][:failed] > 0)
@yaml_wrapper.dump(output_file, results)
return { :result_file => output_file, :result => results }
end
private
def get_results_structure
return {
:source => {:path => '', :file => ''},
:successes => [],
:failures => [],
:ignores => [],
:counts => {:total => 0, :passed => 0, :failed => 0, :ignored => 0},
:stdout => [],
:time => 0.0
}
end
def extract_line_elements(line, filename)
# handle anything preceding filename in line as extra output to be collected
stdout = nil
stdout_regex = /(.+)#{Regexp.escape(filename)}.+/i
if (line =~ stdout_regex)
stdout = $1.clone
line.sub!(/#{Regexp.escape(stdout)}/, '')
end
# collect up test results minus and extra output
elements = (line.strip.split(':'))[1..-1]
return {:test => elements[1], :line => elements[0].to_i, :message => (elements[3..-1].join(':')).strip}, stdout if elements.size >= 3
return {:test => '???', :line => -1, :message => nil} #fallback safe option. TODO better handling
end
end

View File

@@ -0,0 +1,65 @@
require 'rubygems'
require 'rake' # for ext() method
require 'ceedling/constants'
class GeneratorTestResultsSanityChecker
constructor :configurator, :streaminator
def verify(results, unity_exit_code)
# do no sanity checking if it's disabled
return if (@configurator.sanity_checks == TestResultsSanityChecks::NONE)
raise "results nil or empty" if results.nil? || results.empty?
ceedling_ignores_count = results[:ignores].size
ceedling_failures_count = results[:failures].size
ceedling_tests_summation = (ceedling_ignores_count + ceedling_failures_count + results[:successes].size)
# Exit code handling is not a sanity check that can always be performed because
# command line simulators may or may not pass through Unity's exit code
if (@configurator.sanity_checks >= TestResultsSanityChecks::THOROUGH)
# many platforms limit exit codes to a maximum of 255
if ((ceedling_failures_count != unity_exit_code) and (unity_exit_code < 255))
sanity_check_warning(results[:source][:file], "Unity's exit code (#{unity_exit_code}) does not match Ceedling's summation of failed test cases (#{ceedling_failures_count}).")
end
if ((ceedling_failures_count < 255) and (unity_exit_code == 255))
sanity_check_warning(results[:source][:file], "Ceedling's summation of failed test cases (#{ceedling_failures_count}) is less than Unity's exit code (255 or more).")
end
end
if (ceedling_ignores_count != results[:counts][:ignored])
sanity_check_warning(results[:source][:file], "Unity's final ignore count (#{results[:counts][:ignored]}) does not match Ceedling's summation of ignored test cases (#{ceedling_ignores_count}).")
end
if (ceedling_failures_count != results[:counts][:failed])
sanity_check_warning(results[:source][:file], "Unity's final fail count (#{results[:counts][:failed]}) does not match Ceedling's summation of failed test cases (#{ceedling_failures_count}).")
end
if (ceedling_tests_summation != results[:counts][:total])
sanity_check_warning(results[:source][:file], "Unity's final test count (#{results[:counts][:total]}) does not match Ceedling's summation of all test cases (#{ceedling_tests_summation}).")
end
end
private
def sanity_check_warning(file, message)
unless defined?(CEEDLING_IGNORE_SANITY_CHECK)
notice = "\n" +
"ERROR: Internal sanity check for test fixture '#{file.ext(@configurator.extension_executable)}' finds that #{message}\n" +
" Possible causes:\n" +
" 1. Your test + source dereferenced a null pointer.\n" +
" 2. Your test + source indexed past the end of a buffer.\n" +
" 3. Your test + source committed a memory access violation.\n" +
" 4. Your test fixture produced an exit code of 0 despite execution ending prematurely.\n" +
" Sanity check failures of test results are usually a symptom of interrupted test execution.\n\n"
@streaminator.stderr_puts( notice )
raise
end
end
end

View File

@@ -0,0 +1,56 @@
class GeneratorTestRunner
constructor :configurator, :file_path_utils, :file_wrapper
def find_test_cases(test_file)
#Pull in Unity's Test Runner Generator
require 'generate_test_runner.rb'
@test_runner_generator ||= UnityTestRunnerGenerator.new( @configurator.get_runner_config )
if (@configurator.project_use_test_preprocessor)
#redirect to use the preprocessor file if we're doing that sort of thing
pre_test_file = @file_path_utils.form_preprocessed_file_filepath(test_file)
#actually look for the tests using Unity's test runner generator
contents = @file_wrapper.read(pre_test_file)
tests_and_line_numbers = @test_runner_generator.find_tests(contents)
@test_runner_generator.find_setup_and_teardown(contents)
#look up the line numbers in the original file
source_lines = @file_wrapper.read(test_file).split("\n")
source_index = 0;
tests_and_line_numbers.size.times do |i|
source_lines[source_index..-1].each_with_index do |line, index|
if (line =~ /#{tests_and_line_numbers[i][:test]}/)
source_index += index
tests_and_line_numbers[i][:line_number] = source_index + 1
break
end
end
end
else
#Just look for the tests using Unity's test runner generator
contents = @file_wrapper.read(test_file)
tests_and_line_numbers = @test_runner_generator.find_tests(contents)
@test_runner_generator.find_setup_and_teardown(contents)
end
return tests_and_line_numbers
end
def generate(module_name, runner_filepath, test_cases, mock_list, test_file_includes=[])
require 'generate_test_runner.rb'
#actually build the test runner using Unity's test runner generator
#(there is no need to use preprocessor here because we've already looked up test cases and are passing them in here)
@test_runner_generator ||= UnityTestRunnerGenerator.new( @configurator.get_runner_config )
@test_runner_generator.generate( module_name,
runner_filepath,
test_cases,
mock_list,
test_file_includes)
end
end

View File

@@ -0,0 +1,31 @@
class Loginator
constructor :configurator, :project_file_loader, :project_config_manager, :file_wrapper, :system_wrapper
def setup_log_filepath
config_files = []
config_files << @project_file_loader.main_file
config_files << @project_file_loader.user_file
config_files.concat( @project_config_manager.options_files )
config_files.compact!
config_files.map! { |file| file.ext('') }
log_name = config_files.join( '_' )
@project_log_filepath = File.join( @configurator.project_log_path, log_name.ext('.log') )
end
def log(string, heading=nil)
return if (not @configurator.project_logging)
output = "\n[#{@system_wrapper.time_now}]"
output += " :: #{heading}" if (not heading.nil?)
output += "\n#{string.strip}\n"
@file_wrapper.write(@project_log_filepath, output, 'a')
end
end

View File

@@ -0,0 +1,46 @@
# modified version of Rake's provided make-style dependency loader
# customizations:
# (1) handles windows drives in paths -- colons don't confuse task demarcation
# (2) handles spaces in directory paths
module Rake
# Makefile loader to be used with the import file loader.
class MakefileLoader
# Load the makefile dependencies in +fn+.
def load(fn)
open(fn) do |mf|
lines = mf.read
lines.gsub!(/#[^\n]*\n/m, "") # remove comments
lines.gsub!(/\\\n/, ' ') # string together line continuations into single line
lines.split("\n").each do |line|
process_line(line)
end
end
end
private
# Process one logical line of makefile data.
def process_line(line)
# split on presence of task demaractor followed by space (i.e don't get confused by a colon in a win path)
file_tasks, args = line.split(/:\s/)
return if args.nil?
# split at non-escaped space boundary between files (i.e. escaped spaces in paths are left alone)
dependents = args.split(/\b\s+/)
# replace escaped spaces and clean up any extra whitespace
dependents.map! { |path| path.gsub(/\\ /, ' ').strip }
file_tasks.strip.split.each do |file_task|
file file_task => dependents
end
end
end
# Install the handler
Rake.application.add_loader('mf', MakefileLoader.new)
end

View File

@@ -0,0 +1,310 @@
file_wrapper:
file_system_wrapper:
stream_wrapper:
rake_wrapper:
yaml_wrapper:
system_wrapper:
cmock_builder:
reportinator:
rake_utils:
compose:
- rake_wrapper
system_utils:
compose:
- system_wrapper
file_path_utils:
compose:
- configurator
- file_wrapper
file_system_utils:
compose: file_wrapper
project_file_loader:
compose:
- yaml_wrapper
- stream_wrapper
- system_wrapper
- file_wrapper
project_config_manager:
compose:
- cacheinator
- configurator
- yaml_wrapper
- file_wrapper
cacheinator:
compose:
- cacheinator_helper
- file_path_utils
- file_wrapper
- yaml_wrapper
cacheinator_helper:
compose:
- file_wrapper
- yaml_wrapper
tool_executor:
compose:
- configurator
- tool_executor_helper
- streaminator
- system_wrapper
tool_executor_helper:
compose:
- streaminator
- system_utils
- system_wrapper
configurator:
compose:
- configurator_setup
- configurator_plugins
- configurator_builder
- cmock_builder
- yaml_wrapper
- system_wrapper
configurator_setup:
compose:
- configurator_builder
- configurator_validator
- configurator_plugins
- stream_wrapper
configurator_plugins:
compose:
- stream_wrapper
- file_wrapper
- system_wrapper
configurator_validator:
compose:
- file_wrapper
- stream_wrapper
- system_wrapper
configurator_builder:
compose:
- file_system_utils
- file_wrapper
- system_wrapper
loginator:
compose:
- configurator
- project_file_loader
- project_config_manager
- file_wrapper
- system_wrapper
streaminator:
compose:
- streaminator_helper
- verbosinator
- loginator
- stream_wrapper
streaminator_helper:
setupinator:
plugin_builder:
plugin_manager:
compose:
- configurator
- plugin_manager_helper
- streaminator
- reportinator
- system_wrapper
plugin_manager_helper:
plugin_reportinator:
compose:
- plugin_reportinator_helper
- plugin_manager
- reportinator
plugin_reportinator_helper:
compose:
- configurator
- streaminator
- yaml_wrapper
- file_wrapper
verbosinator:
compose: configurator
file_finder:
compose:
- configurator
- file_finder_helper
- cacheinator
- file_path_utils
- file_wrapper
- yaml_wrapper
file_finder_helper:
compose: streaminator
test_includes_extractor:
compose:
- configurator
- yaml_wrapper
- file_wrapper
task_invoker:
compose:
- dependinator
- rake_utils
- rake_wrapper
- project_config_manager
flaginator:
compose:
- configurator
generator:
compose:
- configurator
- generator_helper
- preprocessinator
- cmock_builder
- generator_test_runner
- generator_test_results
- flaginator
- test_includes_extractor
- tool_executor
- file_finder
- file_path_utils
- streaminator
- plugin_manager
- file_wrapper
generator_helper:
compose:
- streaminator
generator_test_results:
compose:
- configurator
- generator_test_results_sanity_checker
- yaml_wrapper
generator_test_results_sanity_checker:
compose:
- configurator
- streaminator
generator_test_runner:
compose:
- configurator
- file_path_utils
- file_wrapper
dependinator:
compose:
- configurator
- project_config_manager
- test_includes_extractor
- file_path_utils
- rake_wrapper
- file_wrapper
preprocessinator:
compose:
- preprocessinator_helper
- preprocessinator_includes_handler
- preprocessinator_file_handler
- task_invoker
- file_path_utils
- yaml_wrapper
preprocessinator_helper:
compose:
- configurator
- test_includes_extractor
- task_invoker
- file_finder
- file_path_utils
preprocessinator_includes_handler:
compose:
- configurator
- tool_executor
- task_invoker
- file_path_utils
- yaml_wrapper
- file_wrapper
preprocessinator_file_handler:
compose:
- preprocessinator_extractor
- configurator
- tool_executor
- file_path_utils
- file_wrapper
preprocessinator_extractor:
test_invoker:
compose:
- configurator
- test_invoker_helper
- plugin_manager
- streaminator
- preprocessinator
- task_invoker
- dependinator
- project_config_manager
- build_invoker_utils
- file_path_utils
- file_wrapper
test_invoker_helper:
compose:
- configurator
- task_invoker
- test_includes_extractor
- file_finder
- file_path_utils
- file_wrapper
release_invoker:
compose:
- configurator
- release_invoker_helper
- build_invoker_utils
- dependinator
- task_invoker
- file_path_utils
- file_wrapper
release_invoker_helper:
compose:
- configurator
- dependinator
- task_invoker
build_invoker_utils:
compose:
- configurator
- streaminator
erb_wrapper:

View File

@@ -0,0 +1,19 @@
def par_map(n, things, &block)
queue = Queue.new
things.each { |thing| queue << thing }
threads = (1..n).collect do
Thread.new do
begin
while true
yield queue.pop(true)
end
rescue ThreadError
end
end
end
threads.each { |t| t.join }
end

View File

@@ -0,0 +1,80 @@
class String
# reformat a multiline string to have given number of whitespace columns;
# helpful for formatting heredocs
def left_margin(margin=0)
non_whitespace_column = 0
new_lines = []
# find first line with non-whitespace and count left columns of whitespace
self.each_line do |line|
if (line =~ /^\s*\S/)
non_whitespace_column = $&.length - 1
break
end
end
# iterate through each line, chopping off leftmost whitespace columns and add back the desired whitespace margin
self.each_line do |line|
columns = []
margin.times{columns << ' '}
# handle special case of line being narrower than width to be lopped off
if (non_whitespace_column < line.length)
new_lines << "#{columns.join}#{line[non_whitespace_column..-1]}"
else
new_lines << "\n"
end
end
return new_lines.join
end
end
class Plugin
attr_reader :name, :environment
attr_accessor :plugin_objects
def initialize(system_objects, name)
@environment = []
@ceedling = system_objects
@name = name
self.setup
end
def setup; end
# mock generation
def pre_mock_generate(arg_hash); end
def post_mock_generate(arg_hash); end
# test runner generation
def pre_runner_generate(arg_hash); end
def post_runner_generate(arg_hash); end
# compilation (test or source)
def pre_compile_execute(arg_hash); end
def post_compile_execute(arg_hash); end
# linking (test or source)
def pre_link_execute(arg_hash); end
def post_link_execute(arg_hash); end
# test fixture execution
def pre_test_fixture_execute(arg_hash); end
def post_test_fixture_execute(arg_hash); end
# test task
def pre_test(test); end
def post_test(test); end
# release task
def pre_release; end
def post_release; end
# whole shebang (any use of Ceedling)
def pre_build; end
def post_build; end
def summary; end
end

View File

@@ -0,0 +1,53 @@
require 'ceedling/plugin'
class PluginBuilder
attr_accessor :plugin_objects
def construct_plugin(plugin_name, object_map_yaml, system_objects)
# @streaminator.stdout_puts("Constructing plugin #{plugin_name}...", Verbosity::OBNOXIOUS)
object_map = {}
@plugin_objects = {}
@system_objects = system_objects
if object_map_yaml
@object_map = YAML.load(object_map_yaml)
@object_map.each_key do |obj|
construct_object(obj)
end
else
raise "Invalid object map for plugin #{plugin_name}!"
end
return @plugin_objects
end
private
def camelize(underscored_name)
return underscored_name.gsub(/(_|^)([a-z0-9])/) {$2.upcase}
end
def construct_object(obj)
if @plugin_objects[obj].nil?
if @object_map[obj] && @object_map[obj]['compose']
@object_map[obj]['compose'].each do |dep|
construct_object(dep)
end
end
build_object(obj)
end
end
def build_object(new_object)
if @plugin_objects[new_object.to_sym].nil?
# @streaminator.stdout_puts("Building plugin object #{new_object}", Verbosity::OBNOXIOUS)
require new_object
class_name = camelize(new_object)
new_instance = eval("#{class_name}.new(@system_objects, class_name.to_s)")
new_instance.plugin_objects = @plugin_objects
@plugin_objects[new_object.to_sym] = new_instance
end
end
end

View File

@@ -0,0 +1,107 @@
require 'ceedling/constants'
class PluginManager
constructor :configurator, :plugin_manager_helper, :streaminator, :reportinator, :system_wrapper
def setup
@build_fail_registry = []
@plugin_objects = [] # so we can preserve order
end
def load_plugin_scripts(script_plugins, system_objects)
environment = []
script_plugins.each do |plugin|
# protect against instantiating object multiple times due to processing config multiple times (option files, etc)
next if (@plugin_manager_helper.include?(@plugin_objects, plugin))
begin
@system_wrapper.require_file( "#{plugin}.rb" )
object = @plugin_manager_helper.instantiate_plugin_script( camelize(plugin), system_objects, plugin )
@plugin_objects << object
environment += object.environment
# add plugins to hash of all system objects
system_objects[plugin.downcase.to_sym] = object
rescue
puts "Exception raised while trying to load plugin: #{plugin}"
raise
end
end
yield( { :environment => environment } ) if (environment.size > 0)
end
def plugins_failed?
return (@build_fail_registry.size > 0)
end
def print_plugin_failures
if (@build_fail_registry.size > 0)
report = @reportinator.generate_banner('BUILD FAILURE SUMMARY')
@build_fail_registry.each do |failure|
report += "#{' - ' if (@build_fail_registry.size > 1)}#{failure}\n"
end
report += "\n"
@streaminator.stderr_puts(report, Verbosity::ERRORS)
end
end
def register_build_failure(message)
@build_fail_registry << message if (message and not message.empty?)
end
#### execute all plugin methods ####
def pre_mock_generate(arg_hash); execute_plugins(:pre_mock_generate, arg_hash); end
def post_mock_generate(arg_hash); execute_plugins(:post_mock_generate, arg_hash); end
def pre_runner_generate(arg_hash); execute_plugins(:pre_runner_generate, arg_hash); end
def post_runner_generate(arg_hash); execute_plugins(:post_runner_generate, arg_hash); end
def pre_compile_execute(arg_hash); execute_plugins(:pre_compile_execute, arg_hash); end
def post_compile_execute(arg_hash); execute_plugins(:post_compile_execute, arg_hash); end
def pre_link_execute(arg_hash); execute_plugins(:pre_link_execute, arg_hash); end
def post_link_execute(arg_hash); execute_plugins(:post_link_execute, arg_hash); end
def pre_test_fixture_execute(arg_hash); execute_plugins(:pre_test_fixture_execute, arg_hash); end
def post_test_fixture_execute(arg_hash)
# special arbitration: raw test results are printed or taken over by plugins handling the job
@streaminator.stdout_puts(arg_hash[:shell_result][:output]) if (@configurator.plugins_display_raw_test_results)
execute_plugins(:post_test_fixture_execute, arg_hash)
end
def pre_test(test); execute_plugins(:pre_test, test); end
def post_test(test); execute_plugins(:post_test, test); end
def pre_release; execute_plugins(:pre_release); end
def post_release; execute_plugins(:post_release); end
def pre_build; execute_plugins(:pre_build); end
def post_build; execute_plugins(:post_build); end
def post_error; execute_plugins(:post_error); end
def summary; execute_plugins(:summary); end
private ####################################
def camelize(underscored_name)
return underscored_name.gsub(/(_|^)([a-z0-9])/) {$2.upcase}
end
def execute_plugins(method, *args)
@plugin_objects.each do |plugin|
begin
plugin.send(method, *args) if plugin.respond_to?(method)
rescue
puts "Exception raised in plugin: #{plugin.name}, in method #{method}"
raise
end
end
end
end

View File

@@ -0,0 +1,19 @@
class PluginManagerHelper
def include?(plugins, name)
include = false
plugins.each do |plugin|
if (plugin.name == name)
include = true
break
end
end
return include
end
def instantiate_plugin_script(plugin, system_objects, name)
return eval("#{plugin}.new(system_objects, name)")
end
end

View File

@@ -0,0 +1,76 @@
require 'ceedling/constants'
require 'ceedling/defaults'
class PluginReportinator
constructor :plugin_reportinator_helper, :plugin_manager, :reportinator
def setup
@test_results_template = nil
end
def set_system_objects(system_objects)
@plugin_reportinator_helper.ceedling = system_objects
end
def fetch_results(results_path, test, options={:boom => false})
return @plugin_reportinator_helper.fetch_results( File.join(results_path, test), options )
end
def generate_banner(message)
return @reportinator.generate_banner(message)
end
def assemble_test_results(results_list, options={:boom => false})
aggregated_results = get_results_structure
results_list.each do |result_path|
results = @plugin_reportinator_helper.fetch_results( result_path, options )
@plugin_reportinator_helper.process_results(aggregated_results, results)
end
return aggregated_results
end
def register_test_results_template(template)
@test_results_template = template if (@test_results_template.nil?)
end
def run_test_results_report(hash, verbosity=Verbosity::NORMAL, &block)
run_report( $stdout,
((@test_results_template.nil?) ? DEFAULT_TESTS_RESULTS_REPORT_TEMPLATE : @test_results_template),
hash,
verbosity,
&block )
end
def run_report(stream, template, hash=nil, verbosity=Verbosity::NORMAL)
failure = nil
failure = yield() if block_given?
@plugin_manager.register_build_failure( failure )
@plugin_reportinator_helper.run_report( stream, template, hash, verbosity )
end
private ###############################
def get_results_structure
return {
:successes => [],
:failures => [],
:ignores => [],
:stdout => [],
:counts => {:total => 0, :passed => 0, :failed => 0, :ignored => 0, :stdout => 0},
:time => 0.0
}
end
end

View File

@@ -0,0 +1,51 @@
require 'erb'
require 'rubygems'
require 'rake' # for ext()
require 'ceedling/constants'
class PluginReportinatorHelper
attr_writer :ceedling
constructor :configurator, :streaminator, :yaml_wrapper, :file_wrapper
def fetch_results(results_path, options)
pass_path = File.join(results_path.ext( @configurator.extension_testpass ))
fail_path = File.join(results_path.ext( @configurator.extension_testfail ))
if (@file_wrapper.exist?(fail_path))
return @yaml_wrapper.load(fail_path)
elsif (@file_wrapper.exist?(pass_path))
return @yaml_wrapper.load(pass_path)
else
if (options[:boom])
@streaminator.stderr_puts("Could find no test results for '#{File.basename(results_path).ext(@configurator.extension_source)}'", Verbosity::ERRORS)
raise
end
end
return {}
end
def process_results(aggregate_results, results)
return if (results.empty?)
aggregate_results[:successes] << { :source => results[:source].clone, :collection => results[:successes].clone } if (results[:successes].size > 0)
aggregate_results[:failures] << { :source => results[:source].clone, :collection => results[:failures].clone } if (results[:failures].size > 0)
aggregate_results[:ignores] << { :source => results[:source].clone, :collection => results[:ignores].clone } if (results[:ignores].size > 0)
aggregate_results[:stdout] << { :source => results[:source].clone, :collection => results[:stdout].clone } if (results[:stdout].size > 0)
aggregate_results[:counts][:total] += results[:counts][:total]
aggregate_results[:counts][:passed] += results[:counts][:passed]
aggregate_results[:counts][:failed] += results[:counts][:failed]
aggregate_results[:counts][:ignored] += results[:counts][:ignored]
aggregate_results[:counts][:stdout] += results[:stdout].size
aggregate_results[:time] += results[:time]
end
def run_report(stream, template, hash, verbosity)
output = ERB.new(template, 0, "%<>")
@streaminator.stream_puts(stream, output.result(binding()), verbosity)
end
end

View File

@@ -0,0 +1,42 @@
class Preprocessinator
attr_reader :preprocess_file_proc
constructor :preprocessinator_helper, :preprocessinator_includes_handler, :preprocessinator_file_handler, :task_invoker, :file_path_utils, :yaml_wrapper
def setup
# fashion ourselves callbacks @preprocessinator_helper can use
@preprocess_includes_proc = Proc.new { |filepath| self.preprocess_shallow_includes(filepath) }
@preprocess_file_proc = Proc.new { |filepath| self.preprocess_file(filepath) }
end
def preprocess_test_and_invoke_test_mocks(test)
@preprocessinator_helper.preprocess_includes(test, @preprocess_includes_proc)
mocks_list = @preprocessinator_helper.assemble_mocks_list(test)
@preprocessinator_helper.preprocess_mockable_headers(mocks_list, @preprocess_file_proc)
@task_invoker.invoke_test_mocks(mocks_list)
@preprocessinator_helper.preprocess_test_file(test, @preprocess_file_proc)
return mocks_list
end
def preprocess_shallow_includes(filepath)
includes = @preprocessinator_includes_handler.extract_includes(filepath)
@preprocessinator_includes_handler.write_shallow_includes_list(
@file_path_utils.form_preprocessed_includes_list_filepath(filepath), includes)
end
def preprocess_file(filepath)
@preprocessinator_includes_handler.invoke_shallow_includes_list(filepath)
@preprocessinator_file_handler.preprocess_file( filepath, @yaml_wrapper.load(@file_path_utils.form_preprocessed_includes_list_filepath(filepath)) )
end
end

View File

@@ -0,0 +1,30 @@
class PreprocessinatorExtractor
def extract_base_file_from_preprocessed_expansion(filepath)
# preprocessing by way of toolchain preprocessor expands macros, eliminates
# comments, strips out #ifdef code, etc. however, it also expands in place
# each #include'd file. so, we must extract only the lines of the file
# that belong to the file originally preprocessed
# iterate through all lines and alternate between extract and ignore modes
# all lines between a '#'line containing file name of our filepath and the
# next '#'line should be extracted
base_name = File.basename(filepath)
not_pragma = /^#(?!pragma\b)/ # preprocessor directive that's not a #pragma
pattern = /^#.*(\s|\/|\\|\")#{Regexp.escape(base_name)}/
found_file = false # have we found the file we care about?
lines = []
File.readlines(filepath).each do |line|
if found_file and not line =~ not_pragma
lines << line
else
found_file = false
end
found_file = true if line =~ pattern
end
return lines
end
end

View File

@@ -0,0 +1,21 @@
class PreprocessinatorFileHandler
constructor :preprocessinator_extractor, :configurator, :tool_executor, :file_path_utils, :file_wrapper
def preprocess_file(filepath, includes)
preprocessed_filepath = @file_path_utils.form_preprocessed_file_filepath(filepath)
command = @tool_executor.build_command_line(@configurator.tools_test_file_preprocessor, [], filepath, preprocessed_filepath)
@tool_executor.exec(command[:line], command[:options])
contents = @preprocessinator_extractor.extract_base_file_from_preprocessed_expansion(preprocessed_filepath)
includes.each{|include| contents.unshift("#include \"#{include}\"")}
@file_wrapper.write(preprocessed_filepath, contents.join("\n"))
end
end

View File

@@ -0,0 +1,46 @@
class PreprocessinatorHelper
constructor :configurator, :test_includes_extractor, :task_invoker, :file_finder, :file_path_utils
def preprocess_includes(test, preprocess_includes_proc)
if (@configurator.project_use_test_preprocessor)
preprocessed_includes_list = @file_path_utils.form_preprocessed_includes_list_filepath(test)
preprocess_includes_proc.call( @file_finder.find_test_from_file_path(preprocessed_includes_list) )
@test_includes_extractor.parse_includes_list(preprocessed_includes_list)
else
@test_includes_extractor.parse_test_file(test)
end
end
def assemble_mocks_list(test)
return @file_path_utils.form_mocks_source_filelist( @test_includes_extractor.lookup_raw_mock_list(test) )
end
def preprocess_mockable_headers(mock_list, preprocess_file_proc)
if (@configurator.project_use_test_preprocessor)
preprocess_files_smartly(
@file_path_utils.form_preprocessed_mockable_headers_filelist(mock_list),
preprocess_file_proc ) { |file| @file_finder.find_header_file(file) }
end
end
def preprocess_test_file(test, preprocess_file_proc)
return if (!@configurator.project_use_test_preprocessor)
preprocess_file_proc.call(test)
end
private ############################
def preprocess_files_smartly(file_list, preprocess_file_proc)
if (@configurator.project_use_deep_dependencies)
@task_invoker.invoke_test_preprocessed_files(file_list)
else
file_list.each { |file| preprocess_file_proc.call( yield(file) ) }
end
end
end

View File

@@ -0,0 +1,181 @@
class PreprocessinatorIncludesHandler
constructor :configurator, :tool_executor, :task_invoker, :file_path_utils, :yaml_wrapper, :file_wrapper
@@makefile_cache = {}
# shallow includes: only those headers a source file explicitly includes
def invoke_shallow_includes_list(filepath)
@task_invoker.invoke_test_shallow_include_lists( [@file_path_utils.form_preprocessed_includes_list_filepath(filepath)] )
end
##
# Ask the preprocessor for a make-style dependency rule of only the headers
# the source file immediately includes.
#
# === Arguments
# +filepath+ _String_:: Path to the test file to process.
#
# === Return
# _String_:: The text of the dependency rule generated by the preprocessor.
def form_shallow_dependencies_rule(filepath)
if @@makefile_cache.has_key?(filepath)
return @@makefile_cache[filepath]
end
# change filename (prefix of '_') to prevent preprocessor from finding
# include files in temp directory containing file it's scanning
temp_filepath = @file_path_utils.form_temp_path(filepath, '_')
# read the file and replace all include statements with a decorated version
# (decorating the names creates file names that don't exist, thus preventing
# the preprocessor from snaking out and discovering the entire include path
# that winds through the code). The decorated filenames indicate files that
# are included directly by the test file.
contents = @file_wrapper.read(filepath)
if !contents.valid_encoding?
contents = contents.encode("UTF-16be", :invalid=>:replace, :replace=>"?").encode('UTF-8')
end
contents.gsub!( /^\s*#include\s+[\"<]\s*(\S+)\s*[\">]/, "#include \"\\1\"\n#include \"@@@@\\1\"" )
contents.gsub!( /^\s*TEST_FILE\(\s*\"\s*(\S+)\s*\"\s*\)/, "#include \"\\1\"\n#include \"@@@@\\1\"")
@file_wrapper.write( temp_filepath, contents )
# extract the make-style dependency rule telling the preprocessor to
# ignore the fact that it can't find the included files
command = @tool_executor.build_command_line(@configurator.tools_test_includes_preprocessor, [], temp_filepath)
shell_result = @tool_executor.exec(command[:line], command[:options])
@@makefile_cache[filepath] = shell_result[:output]
return shell_result[:output]
end
##
# Extract the headers that are directly included by a source file using the
# provided, annotated Make dependency rule.
#
# === Arguments
# +filepath+ _String_:: C source or header file to extract includes for.
#
# === Return
# _Array_ of _String_:: Array of the direct dependencies for the source file.
def extract_includes(filepath)
to_process = [filepath]
ignore_list = []
list = []
include_paths = @configurator.project_config_hash[:collection_paths_include]
include_paths = [] if include_paths.nil?
include_paths.map! {|path| File.expand_path(path)}
while to_process.length > 0
target = to_process.shift()
ignore_list << target
# puts "[HELL] Processing: \t\t#{target}"
new_deps, new_to_process = extract_includes_helper(target, include_paths, ignore_list)
list += new_deps
to_process += new_to_process
if (!@configurator.project_config_hash.has_key?(:project_auto_link_deep_dependencies) or
!@configurator.project_config_hash[:project_auto_link_deep_dependencies])
break
else
list = list.uniq()
to_process = to_process.uniq()
end
end
return list
end
def extract_includes_helper(filepath, include_paths, ignore_list)
# Extract the dependencies from the make rule
hdr_ext = @configurator.extension_header
make_rule = self.form_shallow_dependencies_rule(filepath)
dependencies = make_rule.split.find_all {|path| path.end_with?(hdr_ext) }.uniq
dependencies.map! {|hdr| hdr.gsub('\\','/') }
# Separate the real files form the annotated ones and remove the '@@@@'
annotated_headers, real_headers = dependencies.partition {|hdr| hdr =~ /^@@@@/ }
annotated_headers.map! {|hdr| hdr.gsub('@@@@','') }
# Matching annotated_headers values against real_headers to ensure that
# annotated_headers contain full path entries (as returned by make rule)
annotated_headers.map! {|hdr| real_headers.find {|real_hdr| !real_hdr.match(/(.*\/)?#{Regexp.escape(hdr)}/).nil? } }
annotated_headers = annotated_headers.compact
# Find which of our annotated headers are "real" dependencies. This is
# intended to weed out dependencies that have been removed due to build
# options defined in the project yaml and/or in the headers themselves.
list = annotated_headers.find_all do |annotated_header|
# find the index of the "real" include that matches the annotated one.
idx = real_headers.find_index do |real_header|
real_header =~ /^(.*\/)?#{Regexp.escape(annotated_header)}$/
end
# If we found a real include, delete it from the array and return it,
# otherwise return nil. Since nil is falsy this has the effect of making
# find_all return only the annotated headers for which a real include was
# found/deleted
idx ? real_headers.delete_at(idx) : nil
end.compact
# Extract direct dependencies that were also added
src_ext = @configurator.extension_source
sdependencies = make_rule.split.find_all {|path| path.end_with?(src_ext) }.uniq
sdependencies.map! {|hdr| hdr.gsub('\\','/') }
list += sdependencies
to_process = []
if @configurator.project_config_hash.has_key?(:project_auto_link_deep_dependencies) && @configurator.project_config_hash[:project_auto_link_deep_dependencies]
# Creating list of mocks
mocks = annotated_headers.find_all do |annotated_header|
File.basename(annotated_header) =~ /^#{@configurator.project_config_hash[:cmock_mock_prefix]}.*$/
end.compact
# Creating list of headers that should be recursively pre-processed
# Skipping mocks and unity.h
headers_to_deep_link = annotated_headers.select do |annotated_header|
!(mocks.include? annotated_header) and (annotated_header.match(/^(.*\/)?unity\.h$/).nil?)
end
headers_to_deep_link.map! {|hdr| File.expand_path(hdr)}
mocks.each do |mock|
dirname = File.dirname(mock)
#basename = File.basename(mock).delete_prefix(@configurator.project_config_hash[:cmock_mock_prefix])
basename = File.basename(mock).sub(@configurator.project_config_hash[:cmock_mock_prefix], '')
if dirname != "."
ignore_list << File.join(dirname, basename)
else
ignore_list << basename
end
end.compact
# Filtering list of final includes to only include mocks and anything that is NOT in the ignore_list
list = list.select do |item|
mocks.include? item or !(ignore_list.any? { |ignore_item| !item.match(/^(.*\/)?#{Regexp.escape(ignore_item)}$/).nil? })
end
headers_to_deep_link.each do |hdr|
if (ignore_list.none? {|ignore_header| hdr.match(/^(.*\/)?#{Regexp.escape(ignore_header)}$/)} and
include_paths.none? {|include_path| hdr =~ /^#{include_path}\.*/})
if File.exist?(hdr)
to_process << hdr
#source_file = hdr.delete_suffix(hdr_ext) + src_ext
source_file = hdr.chomp(hdr_ext) + src_ext
if source_file != hdr and File.exist?(source_file)
to_process << source_file
end
end
end
end
end
return list, to_process
end
def write_shallow_includes_list(filepath, list)
@yaml_wrapper.dump(filepath, list)
end
end

View File

@@ -0,0 +1,46 @@
require 'ceedling/constants'
class ProjectConfigManager
attr_reader :options_files, :release_config_changed, :test_config_changed, :test_defines_changed
attr_accessor :config_hash
constructor :cacheinator, :configurator, :yaml_wrapper, :file_wrapper
def setup
@options_files = []
@release_config_changed = false
@test_config_changed = false
@test_defines_changed = false
end
def merge_options(config_hash, option_filepath)
@options_files << File.basename( option_filepath )
config_hash.deep_merge!( @yaml_wrapper.load( option_filepath ) )
end
def process_release_config_change
# has project configuration changed since last release build
@release_config_changed = @cacheinator.diff_cached_release_config?( @config_hash )
end
def process_test_config_change
# has project configuration changed since last test build
@test_config_changed = @cacheinator.diff_cached_test_config?( @config_hash )
end
def process_test_defines_change(files)
# has definitions changed since last test build
@test_defines_changed = @cacheinator.diff_cached_test_defines?( files )
if @test_defines_changed
# update timestamp for rake task prerequisites
@file_wrapper.touch( @configurator.project_test_force_rebuild_filepath )
end
end
end

View File

@@ -0,0 +1,99 @@
require 'ceedling/constants'
class ProjectFileLoader
attr_reader :main_file, :user_file
constructor :yaml_wrapper, :stream_wrapper, :system_wrapper, :file_wrapper
def setup
@main_file = nil
@mixin_files = []
@user_file = nil
@main_project_filepath = ''
@mixin_project_filepaths = []
@user_project_filepath = ''
end
def find_project_files
# first go hunting for optional user project file by looking for environment variable and then default location on disk
user_filepath = @system_wrapper.env_get('CEEDLING_USER_PROJECT_FILE')
if ( not user_filepath.nil? and @file_wrapper.exist?(user_filepath) )
@user_project_filepath = user_filepath
elsif (@file_wrapper.exist?(DEFAULT_CEEDLING_USER_PROJECT_FILE))
@user_project_filepath = DEFAULT_CEEDLING_USER_PROJECT_FILE
end
# next check for mixin project files by looking for environment variable
mixin_filepaths = @system_wrapper.env_get('CEEDLING_MIXIN_PROJECT_FILES')
if ( not mixin_filepaths.nil? )
mixin_filepaths.split(File::PATH_SEPARATOR).each do |filepath|
if ( @file_wrapper.exist?(filepath) )
@mixin_project_filepaths.push(filepath)
end
end
end
# next check for main project file by looking for environment variable and then default location on disk;
# blow up if we don't find this guy -- like, he's so totally important
main_filepath = @system_wrapper.env_get('CEEDLING_MAIN_PROJECT_FILE')
if ( not main_filepath.nil? and @file_wrapper.exist?(main_filepath) )
@main_project_filepath = main_filepath
elsif (@file_wrapper.exist?(DEFAULT_CEEDLING_MAIN_PROJECT_FILE))
@main_project_filepath = DEFAULT_CEEDLING_MAIN_PROJECT_FILE
else
# no verbosity checking since this is lowest level reporting anyhow &
# verbosity checking depends on configurator which in turns needs this class (circular dependency)
@stream_wrapper.stderr_puts('Found no Ceedling project file (*.yml)')
raise
end
@main_file = File.basename( @main_project_filepath )
@mixin_project_filepaths.each do |filepath|
@mixin_files.push(File.basename( filepath ))
end
@user_file = File.basename( @user_project_filepath ) if ( not @user_project_filepath.empty? )
end
def yaml_merger(y1, y2)
o1 = y1
y2.each_pair do |k,v|
if o1[k].nil?
o1[k] = v
else
if (o1[k].instance_of? Hash)
o1[k] = yaml_merger(o1[k], v)
elsif (o1[k].instance_of? Array)
o1[k] += v
else
o1[k] = v
end
end
end
return o1
end
def load_project_config
config_hash = @yaml_wrapper.load(@main_project_filepath)
# if there are mixin project files, then use them
@mixin_project_filepaths.each do |filepath|
mixin = @yaml_wrapper.load(filepath)
config_hash = yaml_merger( config_hash, mixin )
end
# if there's a user project file, then use it
if ( not @user_project_filepath.empty? )
user_hash = @yaml_wrapper.load(@user_project_filepath)
config_hash = yaml_merger( config_hash, user_hash )
end
return config_hash
end
end

View File

@@ -0,0 +1,17 @@
class RakeUtils
constructor :rake_wrapper
def task_invoked?(task_regex)
task_invoked = false
@rake_wrapper.task_list.each do |task|
if ((task.already_invoked) and (task.to_s =~ task_regex))
task_invoked = true
break
end
end
return task_invoked
end
end

View File

@@ -0,0 +1,33 @@
require 'rubygems'
require 'rake'
require 'ceedling/makefile' # our replacement for rake's make-style dependency loader
include Rake::DSL if defined?(Rake::DSL)
class Rake::Task
attr_reader :already_invoked
end
class RakeWrapper
def initialize
@makefile_loader = Rake::MakefileLoader.new # use our custom replacement noted above
end
def [](task)
return Rake::Task[task]
end
def task_list
return Rake::Task.tasks
end
def create_file_task(file_task, dependencies)
file(file_task => dependencies)
end
def load_dependencies(dependencies_path)
@makefile_loader.load(dependencies_path)
end
end

View File

@@ -0,0 +1,86 @@
require 'fileutils'
# get directory containing this here file, back up one directory, and expand to full path
CEEDLING_ROOT = File.expand_path(File.dirname(__FILE__) + '/../..')
CEEDLING_LIB = File.join(CEEDLING_ROOT, 'lib')
CEEDLING_VENDOR = File.join(CEEDLING_ROOT, 'vendor')
CEEDLING_RELEASE = File.join(CEEDLING_ROOT, 'release')
$LOAD_PATH.unshift( CEEDLING_LIB )
$LOAD_PATH.unshift( File.join(CEEDLING_VENDOR, 'unity/auto') )
$LOAD_PATH.unshift( File.join(CEEDLING_VENDOR, 'diy/lib') )
$LOAD_PATH.unshift( File.join(CEEDLING_VENDOR, 'cmock/lib') )
$LOAD_PATH.unshift( File.join(CEEDLING_VENDOR, 'deep_merge/lib') )
require 'rake'
#Let's make sure we remember the task descriptions in case we need them
Rake::TaskManager.record_task_metadata = true
require 'diy'
require 'constructor'
require 'ceedling/constants'
require 'ceedling/target_loader'
# construct all our objects
# ensure load path contains all libraries needed first
lib_ceedling_load_path_temp = File.join(CEEDLING_LIB, 'ceedling')
$LOAD_PATH.unshift( lib_ceedling_load_path_temp )
@ceedling = DIY::Context.from_yaml( File.read( File.join(lib_ceedling_load_path_temp, 'objects.yml') ) )
@ceedling.build_everything
# now that all objects are built, delete 'lib/ceedling' from load path
$LOAD_PATH.delete(lib_ceedling_load_path_temp)
# one-stop shopping for all our setup and such after construction
@ceedling[:setupinator].ceedling = @ceedling
project_config =
begin
cfg = @ceedling[:setupinator].load_project_files
TargetLoader.inspect(cfg, ENV['TARGET'])
rescue TargetLoader::NoTargets
cfg
rescue TargetLoader::RequestReload
@ceedling[:setupinator].load_project_files
end
@ceedling[:setupinator].do_setup( project_config )
# tell all our plugins we're about to do something
@ceedling[:plugin_manager].pre_build
# load rakefile component files (*.rake)
PROJECT_RAKEFILE_COMPONENT_FILES.each { |component| load(component) }
# tell rake to shut up by default (overridden in verbosity / debug tasks as appropriate)
verbose(false)
# end block always executed following rake run
END {
$stdout.flush unless $stdout.nil?
$stderr.flush unless $stderr.nil?
# cache our input configurations to use in comparison upon next execution
@ceedling[:cacheinator].cache_test_config( @ceedling[:setupinator].config_hash ) if (@ceedling[:task_invoker].test_invoked?)
@ceedling[:cacheinator].cache_release_config( @ceedling[:setupinator].config_hash ) if (@ceedling[:task_invoker].release_invoked?)
# delete all temp files unless we're in debug mode
if (not @ceedling[:configurator].project_debug)
@ceedling[:file_wrapper].rm_f( @ceedling[:file_wrapper].directory_listing( File.join(@ceedling[:configurator].project_temp_path, '*') ))
end
# only perform these final steps if we got here without runtime exceptions or errors
if (@ceedling[:system_wrapper].ruby_success)
# tell all our plugins the build is done and process results
@ceedling[:plugin_manager].post_build
@ceedling[:plugin_manager].print_plugin_failures
exit(1) if (@ceedling[:plugin_manager].plugins_failed? && !@ceedling[:setupinator].config_hash[:graceful_fail])
else
puts "ERROR: Ceedling Failed"
@ceedling[:plugin_manager].post_error
end
}

View File

@@ -0,0 +1,73 @@
require 'ceedling/constants'
class ReleaseInvoker
constructor :configurator, :release_invoker_helper, :build_invoker_utils, :dependinator, :task_invoker, :file_path_utils, :file_wrapper
def setup_and_invoke_c_objects( c_files )
objects = @file_path_utils.form_release_build_c_objects_filelist( c_files )
begin
@release_invoker_helper.process_deep_dependencies( @file_path_utils.form_release_dependencies_filelist( c_files ) )
@dependinator.enhance_release_file_dependencies( objects )
@task_invoker.invoke_release_objects( objects )
rescue => e
@build_invoker_utils.process_exception( e, RELEASE_SYM, false )
end
return objects
end
def setup_and_invoke_asm_objects( asm_files )
objects = @file_path_utils.form_release_build_asm_objects_filelist( asm_files )
begin
@dependinator.enhance_release_file_dependencies( objects )
@task_invoker.invoke_release_objects( objects )
rescue => e
@build_invoker_utils.process_exception( e, RELEASE_SYM, false )
end
return objects
end
def refresh_c_deep_dependencies
return if (not @configurator.project_use_deep_dependencies)
@file_wrapper.rm_f(
@file_wrapper.directory_listing(
File.join( @configurator.project_release_dependencies_path, '*' + @configurator.extension_dependencies ) ) )
@release_invoker_helper.process_deep_dependencies(
@file_path_utils.form_release_dependencies_filelist(
@configurator.collection_all_source ) )
end
def artifactinate( *files )
files.flatten.each do |file|
@file_wrapper.cp( file, @configurator.project_release_artifacts_path ) if @file_wrapper.exist?( file )
end
end
def convert_libraries_to_arguments(libraries)
args = (libraries || []) + ((defined? LIBRARIES_SYSTEM) ? LIBRARIES_SYSTEM : [])
if (defined? LIBRARIES_FLAG)
args.map! {|v| LIBRARIES_FLAG.gsub(/\$\{1\}/, v) }
end
return args
end
def sort_objects_and_libraries(both)
extension = "\\" + ((defined? EXTENSION_SUBPROJECTS) ? EXTENSION_SUBPROJECTS : ".LIBRARY")
sorted_objects = both.group_by {|v| v.match(/.+#{extension}$/) ? :libraries : :objects }
libraries = sorted_objects[:libraries] || []
objects = sorted_objects[:objects] || []
return objects, libraries
end
end

View File

@@ -0,0 +1,19 @@
class ReleaseInvokerHelper
constructor :configurator, :dependinator, :task_invoker
def process_deep_dependencies(dependencies_list)
return if (not @configurator.project_use_deep_dependencies)
if @configurator.project_generate_deep_dependencies
@dependinator.enhance_release_file_dependencies( dependencies_list )
@task_invoker.invoke_release_dependencies_files( dependencies_list )
end
@dependinator.load_release_object_deep_dependencies( dependencies_list )
end
end

View File

@@ -0,0 +1,26 @@
##
# Pretifies reports
class Reportinator
##
# Generates a banner for a message based on the length of the message or a
# given width.
# ==== Attributes
#
# * _message_: The message to put.
# * _width_: The width of the message. If nil the size of the banner is
# determined by the length of the message.
#
# ==== Examples
#
# rp = Reportinator.new
# rp.generate_banner("Hello world!") => "------------\nHello world!\n------------\n"
# rp.generate_banner("Hello world!", 3) => "---\nHello world!\n---\n"
#
#
def generate_banner(message, width=nil)
dash_count = ((width.nil?) ? message.strip.length : width)
return "#{'-' * dash_count}\n#{message}\n#{'-' * dash_count}\n"
end
end

View File

@@ -0,0 +1,9 @@
rule(/#{CMOCK_MOCK_PREFIX}[^\/\\]+#{'\\'+EXTENSION_SOURCE}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_header_input_for_mock_file(task_name)
end
]) do |mock|
@ceedling[:generator].generate_mock(TEST_SYM, mock.source)
end

View File

@@ -0,0 +1,26 @@
# invocations against this rule should only happen when enhanced dependencies are enabled;
# otherwise, dependency tracking will be too shallow and preprocessed files could intermittently
# fail to be updated when they actually need to be.
rule(/#{PROJECT_TEST_PREPROCESS_FILES_PATH}\/.+/ => [
proc do |task_name|
@ceedling[:file_finder].find_test_or_source_or_header_file(task_name)
end
]) do |file|
if (not @ceedling[:configurator].project_use_deep_dependencies)
raise 'ERROR: Ceedling preprocessing rule invoked though neccessary auxiliary dependency support not enabled.'
end
@ceedling[:generator].generate_preprocessed_file(TEST_SYM, file.source)
end
# invocations against this rule can always happen as there are no deeper dependencies to consider
rule(/#{PROJECT_TEST_PREPROCESS_INCLUDES_PATH}\/.+/ => [
proc do |task_name|
@ceedling[:file_finder].find_test_or_source_or_header_file(task_name)
end
]) do |file|
@ceedling[:generator].generate_shallow_includes_list(TEST_SYM, file.source)
end

View File

@@ -0,0 +1,86 @@
RELEASE_COMPILE_TASK_ROOT = RELEASE_TASK_ROOT + 'compile:' unless defined?(RELEASE_COMPILE_TASK_ROOT)
RELEASE_ASSEMBLE_TASK_ROOT = RELEASE_TASK_ROOT + 'assemble:' unless defined?(RELEASE_ASSEMBLE_TASK_ROOT)
if (RELEASE_BUILD_USE_ASSEMBLY)
rule(/#{PROJECT_RELEASE_BUILD_OUTPUT_ASM_PATH}\/#{'.+\\'+EXTENSION_OBJECT}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_assembly_file(task_name)
end
]) do |object|
@ceedling[:generator].generate_object_file(
TOOLS_RELEASE_ASSEMBLER,
OPERATION_ASSEMBLE_SYM,
RELEASE_SYM,
object.source,
object.name )
end
end
rule(/#{PROJECT_RELEASE_BUILD_OUTPUT_C_PATH}\/#{'.+\\'+EXTENSION_OBJECT}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name, :error, true)
end
]) do |object|
@ceedling[:generator].generate_object_file(
TOOLS_RELEASE_COMPILER,
OPERATION_COMPILE_SYM,
RELEASE_SYM,
object.source,
object.name,
@ceedling[:file_path_utils].form_release_build_c_list_filepath( object.name ),
@ceedling[:file_path_utils].form_release_dependencies_filepath( object.name ) )
end
rule(/#{PROJECT_RELEASE_BUILD_TARGET}/) do |bin_file|
objects, libraries = @ceedling[:release_invoker].sort_objects_and_libraries(bin_file.prerequisites)
tool = TOOLS_RELEASE_LINKER.clone
lib_args = @ceedling[:release_invoker].convert_libraries_to_arguments(libraries)
map_file = @ceedling[:configurator].project_release_build_map
@ceedling[:generator].generate_executable_file(
tool,
RELEASE_SYM,
objects,
bin_file.name,
map_file,
lib_args )
@ceedling[:release_invoker].artifactinate( bin_file.name, map_file, @ceedling[:configurator].release_build_artifacts )
end
namespace RELEASE_SYM do
# use rules to increase efficiency for large projects (instead of iterating through all sources and creating defined tasks)
namespace :compile do
rule(/^#{RELEASE_COMPILE_TASK_ROOT}\S+#{'\\'+EXTENSION_SOURCE}$/ => [ # compile task names by regex
proc do |task_name|
source = task_name.sub(/#{RELEASE_COMPILE_TASK_ROOT}/, '')
@ceedling[:file_finder].find_source_file(source, :error)
end
]) do |compile|
@ceedling[:rake_wrapper][:directories].invoke
@ceedling[:project_config_manager].process_release_config_change
@ceedling[:release_invoker].setup_and_invoke_c_objects( [compile.source] )
end
end
if (RELEASE_BUILD_USE_ASSEMBLY)
namespace :assemble do
rule(/^#{RELEASE_ASSEMBLE_TASK_ROOT}\S+#{'\\'+EXTENSION_ASSEMBLY}$/ => [ # assemble task names by regex
proc do |task_name|
source = task_name.sub(/#{RELEASE_ASSEMBLE_TASK_ROOT}/, '')
@ceedling[:file_finder].find_assembly_file(source)
end
]) do |assemble|
@ceedling[:rake_wrapper][:directories].invoke
@ceedling[:project_config_manager].process_release_config_change
@ceedling[:release_invoker].setup_and_invoke_asm_objects( [assemble.source] )
end
end
end
end

View File

@@ -0,0 +1,15 @@
rule(/#{PROJECT_RELEASE_DEPENDENCIES_PATH}\/#{'.+\\'+EXTENSION_DEPENDENCIES}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name, :error, true)
end
]) do |dep|
@ceedling[:generator].generate_dependencies_file(
TOOLS_RELEASE_DEPENDENCIES_GENERATOR,
RELEASE_SYM,
dep.source,
@ceedling[:file_path_utils].form_release_build_c_object_filepath(dep.source),
dep.name)
end

View File

@@ -0,0 +1,74 @@
rule(/#{PROJECT_TEST_FILE_PREFIX}#{'.+'+TEST_RUNNER_FILE_SUFFIX}#{'\\'+EXTENSION_SOURCE}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_test_input_for_runner_file(task_name)
end
]) do |runner|
@ceedling[:generator].generate_test_runner(TEST_SYM, runner.source, runner.name)
end
rule(/#{PROJECT_TEST_BUILD_OUTPUT_C_PATH}\/#{'.+\\'+EXTENSION_OBJECT}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name)
end
]) do |object|
if (File.basename(object.source) =~ /#{EXTENSION_SOURCE}$/)
@ceedling[:generator].generate_object_file(
TOOLS_TEST_COMPILER,
OPERATION_COMPILE_SYM,
TEST_SYM,
object.source,
object.name,
@ceedling[:file_path_utils].form_test_build_list_filepath( object.name ),
@ceedling[:file_path_utils].form_test_dependencies_filepath( object.name ))
elsif (defined?(TEST_BUILD_USE_ASSEMBLY) && TEST_BUILD_USE_ASSEMBLY)
@ceedling[:generator].generate_object_file(
TOOLS_TEST_ASSEMBLER,
OPERATION_ASSEMBLE_SYM,
TEST_SYM,
object.source,
object.name )
end
end
rule(/#{PROJECT_TEST_BUILD_OUTPUT_PATH}\/#{'.+\\'+EXTENSION_EXECUTABLE}$/) do |bin_file|
lib_args = @ceedling[:test_invoker].convert_libraries_to_arguments()
@ceedling[:generator].generate_executable_file(
TOOLS_TEST_LINKER,
TEST_SYM,
bin_file.prerequisites,
bin_file.name,
@ceedling[:file_path_utils].form_test_build_map_filepath( bin_file.name ),
lib_args )
end
rule(/#{PROJECT_TEST_RESULTS_PATH}\/#{'.+\\'+EXTENSION_TESTPASS}$/ => [
proc do |task_name|
@ceedling[:file_path_utils].form_test_executable_filepath(task_name)
end
]) do |test_result|
@ceedling[:generator].generate_test_results(TOOLS_TEST_FIXTURE, TEST_SYM, test_result.source, test_result.name)
end
namespace TEST_SYM do
# use rules to increase efficiency for large projects (instead of iterating through all sources and creating defined tasks)
rule(/^#{TEST_TASK_ROOT}\S+$/ => [ # test task names by regex
proc do |task_name|
test = task_name.sub(/#{TEST_TASK_ROOT}/, '')
test = "#{PROJECT_TEST_FILE_PREFIX}#{test}" if not (test.start_with?(PROJECT_TEST_FILE_PREFIX))
@ceedling[:file_finder].find_test_from_file_path(test)
end
]) do |test|
@ceedling[:rake_wrapper][:directories].reenable if @ceedling[:task_invoker].first_run == false && @ceedling[:project_config_manager].test_defines_changed
@ceedling[:rake_wrapper][:directories].invoke
@ceedling[:test_invoker].setup_and_invoke([test.source])
end
end

View File

@@ -0,0 +1,15 @@
rule(/#{PROJECT_TEST_DEPENDENCIES_PATH}\/#{'.+\\'+EXTENSION_DEPENDENCIES}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name)
end
]) do |dep|
@ceedling[:generator].generate_dependencies_file(
TOOLS_TEST_DEPENDENCIES_GENERATOR,
TEST_SYM,
dep.source,
@ceedling[:file_path_utils].form_test_build_c_object_filepath(dep.source),
dep.name)
end

View File

@@ -0,0 +1,53 @@
class Setupinator
attr_reader :config_hash
attr_writer :ceedling
def setup
@ceedling = {}
@config_hash = {}
end
def load_project_files
@ceedling[:project_file_loader].find_project_files
return @ceedling[:project_file_loader].load_project_config
end
def do_setup(config_hash)
@config_hash = config_hash
# load up all the constants and accessors our rake files, objects, & external scripts will need;
# note: configurator modifies the cmock section of the hash with a couple defaults to tie
# project together - the modified hash is used to build cmock object
@ceedling[:configurator].populate_defaults( config_hash )
@ceedling[:configurator].populate_unity_defaults( config_hash )
@ceedling[:configurator].populate_cmock_defaults( config_hash )
@ceedling[:configurator].find_and_merge_plugins( config_hash )
@ceedling[:configurator].merge_imports( config_hash )
@ceedling[:configurator].tools_setup( config_hash )
@ceedling[:configurator].eval_environment_variables( config_hash )
@ceedling[:configurator].eval_paths( config_hash )
@ceedling[:configurator].standardize_paths( config_hash )
@ceedling[:configurator].validate( config_hash )
@ceedling[:configurator].build( config_hash, :environment )
@ceedling[:configurator].insert_rake_plugins( @ceedling[:configurator].rake_plugins )
@ceedling[:configurator].tools_supplement_arguments( config_hash )
# merge in any environment variables plugins specify, after the main build
@ceedling[:plugin_manager].load_plugin_scripts( @ceedling[:configurator].script_plugins, @ceedling ) do |env|
@ceedling[:configurator].eval_environment_variables( env )
@ceedling[:configurator].build_supplement( config_hash, env )
end
@ceedling[:plugin_reportinator].set_system_objects( @ceedling )
@ceedling[:file_finder].prepare_search_sources
@ceedling[:loginator].setup_log_filepath
@ceedling[:project_config_manager].config_hash = config_hash
end
def reset_defaults(config_hash)
@ceedling[:configurator].reset_defaults( config_hash )
end
end

View File

@@ -0,0 +1,28 @@
class StreamWrapper
def stdout_override(&fnc)
@stdout_overide_fnc = fnc
end
def stdout_puts(string)
if @stdout_overide_fnc
@stdout_overide_fnc.call(string)
else
$stdout.puts(string)
end
end
def stdout_flush
$stdout.flush
end
def stderr_puts(string)
$stderr.puts(string)
end
def stderr_flush
$stderr.flush
end
end

View File

@@ -0,0 +1,40 @@
require 'ceedling/constants'
class Streaminator
constructor :streaminator_helper, :verbosinator, :loginator, :stream_wrapper
# for those objects for whom the configurator has already been instantiated,
# Streaminator is a convenience object for handling verbosity and writing to the std streams
def stdout_puts(string, verbosity=Verbosity::NORMAL)
if (@verbosinator.should_output?(verbosity))
@stream_wrapper.stdout_puts(string)
@stream_wrapper.stdout_flush
end
# write to log as though Verbosity::OBNOXIOUS
@loginator.log( string, @streaminator_helper.extract_name($stdout) )
end
def stderr_puts(string, verbosity=Verbosity::NORMAL)
if (@verbosinator.should_output?(verbosity))
@stream_wrapper.stderr_puts(string)
@stream_wrapper.stderr_flush
end
# write to log as though Verbosity::OBNOXIOUS
@loginator.log( string, @streaminator_helper.extract_name($stderr) )
end
def stream_puts(stream, string, verbosity=Verbosity::NORMAL)
if (@verbosinator.should_output?(verbosity))
stream.puts(string)
stream.flush
end
# write to log as though Verbosity::OBNOXIOUS
@loginator.log( string, @streaminator_helper.extract_name(stream) )
end
end

View File

@@ -0,0 +1,15 @@
class StreaminatorHelper
def extract_name(stream)
name = case (stream.fileno)
when 0 then '#<IO:$stdin>'
when 1 then '#<IO:$stdout>'
when 2 then '#<IO:$stderr>'
else stream.inspect
end
return name
end
end

View File

@@ -0,0 +1,37 @@
class Object
def deep_clone
Marshal::load(Marshal.dump(self))
end
end
##
# Class containing system utility funcions.
class SystemUtils
constructor :system_wrapper
##
# Sets up the class.
def setup
@tcsh_shell = nil
end
##
# Checks the system shell to see if it a tcsh shell.
def tcsh_shell?
# once run a single time, return state determined at that execution
return @tcsh_shell if not @tcsh_shell.nil?
result = @system_wrapper.shell_backticks('echo $version')
if ((result[:exit_code] == 0) and (result[:output].strip =~ /^tcsh/))
@tcsh_shell = true
else
@tcsh_shell = false
end
return @tcsh_shell
end
end

View File

@@ -0,0 +1,80 @@
require 'rbconfig'
class SystemWrapper
# static method for use in defaults
def self.windows?
return ((RbConfig::CONFIG['host_os'] =~ /mswin|mingw/) ? true : false) if defined?(RbConfig)
return ((Config::CONFIG['host_os'] =~ /mswin|mingw/) ? true : false)
end
# class method so as to be mockable for tests
def windows?
return SystemWrapper.windows?
end
def module_eval(string)
return Object.module_eval("\"" + string + "\"")
end
def eval(string)
return eval(string)
end
def search_paths
return ENV['PATH'].split(File::PATH_SEPARATOR)
end
def cmdline_args
return ARGV
end
def env_set(name, value)
ENV[name] = value
end
def env_get(name)
return ENV[name]
end
def time_now
return Time.now.asctime
end
def shell_backticks(command, boom = true)
retval = `#{command}`.freeze
$exit_code = ($?.exitstatus).freeze if boom
return {
:output => retval.freeze,
:exit_code => ($?.exitstatus).freeze
}
end
def shell_system(command, boom = true)
system( command )
$exit_code = ($?.exitstatus).freeze if boom
return {
:output => "".freeze,
:exit_code => ($?.exitstatus).freeze
}
end
def add_load_path(path)
$LOAD_PATH.unshift(path)
end
def require_file(path)
require(path)
end
def ruby_success
# We are successful if we've never had an exit code that went boom (either because it's empty or it was 0)
return ($exit_code.nil? || ($exit_code == 0)) && ($!.nil? || $!.is_a?(SystemExit) && $!.success?)
end
def constants_include?(item)
# forcing to strings provides consistency across Ruby versions
return Object.constants.map{|constant| constant.to_s}.include?(item.to_s)
end
end

View File

@@ -0,0 +1,38 @@
module TargetLoader
class NoTargets < Exception; end
class NoDirectory < Exception; end
class NoDefault < Exception; end
class NoSuchTarget < Exception; end
class RequestReload < Exception; end
def self.inspect(config, target_name=nil)
unless config[:targets]
raise NoTargets
end
targets = config[:targets]
unless targets[:targets_directory]
raise NoDirectory.new("No targets directory specified.")
end
unless targets[:default_target]
raise NoDefault.new("No default target specified.")
end
target_path = lambda {|name| File.join(targets[:targets_directory], name + ".yml")}
target = if target_name
target_path.call(target_name)
else
target_path.call(targets[:default_target])
end
unless File.exists? target
raise NoSuchTarget.new("No such target: #{target}")
end
ENV['CEEDLING_MAIN_PROJECT_FILE'] = target
raise RequestReload
end
end

View File

@@ -0,0 +1,117 @@
require 'ceedling/par_map'
class TaskInvoker
attr_accessor :first_run
constructor :dependinator, :rake_utils, :rake_wrapper, :project_config_manager
def setup
@test_regexs = [/^#{TEST_ROOT_NAME}:/]
@release_regexs = [/^#{RELEASE_ROOT_NAME}(:|$)/]
@first_run = true
end
def add_test_task_regex(regex)
@test_regexs << regex
end
def add_release_task_regex(regex)
@release_regexs << regex
end
def test_invoked?
invoked = false
@test_regexs.each do |regex|
invoked = true if (@rake_utils.task_invoked?(regex))
break if invoked
end
return invoked
end
def release_invoked?
invoked = false
@release_regexs.each do |regex|
invoked = true if (@rake_utils.task_invoked?(regex))
break if invoked
end
return invoked
end
def invoked?(regex)
return @rake_utils.task_invoked?(regex)
end
def invoke_test_mocks(mocks)
@dependinator.enhance_mock_dependencies( mocks )
mocks.each { |mock|
@rake_wrapper[mock].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[mock].invoke
}
end
def invoke_test_runner(runner)
@dependinator.enhance_runner_dependencies( runner )
@rake_wrapper[runner].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[runner].invoke
end
def invoke_test_shallow_include_lists(files)
@dependinator.enhance_shallow_include_lists_dependencies( files )
par_map(PROJECT_COMPILE_THREADS, files) do |file|
@rake_wrapper[file].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[file].invoke
end
end
def invoke_test_preprocessed_files(files)
@dependinator.enhance_preprocesed_file_dependencies( files )
par_map(PROJECT_COMPILE_THREADS, files) do |file|
@rake_wrapper[file].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[file].invoke
end
end
def invoke_test_dependencies_files(files)
@dependinator.enhance_dependencies_dependencies( files )
par_map(PROJECT_COMPILE_THREADS, files) do |file|
@rake_wrapper[file].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[file].invoke
end
end
def invoke_test_objects(objects)
par_map(PROJECT_COMPILE_THREADS, objects) do |object|
@rake_wrapper[object].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[object].invoke
end
end
def invoke_test_executable(file)
@rake_wrapper[file].invoke
end
def invoke_test_results(result)
@dependinator.enhance_results_dependencies( result )
@rake_wrapper[result].reenable if @first_run == false && @project_config_manager.test_defines_changed
@rake_wrapper[result].invoke
end
def invoke_release_dependencies_files(files)
par_map(PROJECT_COMPILE_THREADS, files) do |file|
@rake_wrapper[file].invoke
end
end
def invoke_release_objects(objects)
par_map(PROJECT_COMPILE_THREADS, objects) do |object|
@rake_wrapper[object].invoke
end
end
end

View File

@@ -0,0 +1,111 @@
require 'ceedling/constants'
require 'ceedling/file_path_utils'
require 'ceedling/version'
desc "Display build environment version info."
task :version do
puts " Ceedling:: #{Ceedling::Version::CEEDLING}"
[
['CException', File.join( CEEDLING_VENDOR, CEXCEPTION_ROOT_PATH)],
[' CMock', File.join( CEEDLING_VENDOR, CMOCK_ROOT_PATH)],
[' Unity', File.join( CEEDLING_VENDOR, UNITY_ROOT_PATH)],
].each do |tool|
name = tool[0]
base_path = tool[1]
version_string = begin
@ceedling[:file_wrapper].read( File.join(base_path, 'release', 'version.info') ).strip
rescue
"UNKNOWN"
end
build_string = begin
@ceedling[:file_wrapper].read( File.join(base_path, 'release', 'build.info') ).strip
rescue
"UNKNOWN"
end
puts "#{name}:: #{version_string.empty? ? '#.#.' : (version_string + '.')}#{build_string.empty? ? '?' : build_string}"
end
end
desc "Set verbose output (silent:[#{Verbosity::SILENT}] - obnoxious:[#{Verbosity::OBNOXIOUS}])."
task :verbosity, :level do |t, args|
verbosity_level = args.level.to_i
if (PROJECT_USE_MOCKS)
# don't store verbosity level in setupinator's config hash, use a copy;
# otherwise, the input configuration will change and trigger entire project rebuilds
hash = @ceedling[:setupinator].config_hash[:cmock].clone
hash[:verbosity] = verbosity_level
@ceedling[:cmock_builder].manufacture( hash )
end
@ceedling[:configurator].project_verbosity = verbosity_level
# control rake's verbosity with new setting
verbose( ((verbosity_level >= Verbosity::OBNOXIOUS) ? true : false) )
end
desc "Enable logging"
task :logging do
@ceedling[:configurator].project_logging = true
end
# non advertised debug task
task :debug do
Rake::Task[:verbosity].invoke(Verbosity::DEBUG)
Rake.application.options.trace = true
@ceedling[:configurator].project_debug = true
end
# non advertised sanity checking task
task :sanity_checks, :level do |t, args|
check_level = args.level.to_i
@ceedling[:configurator].sanity_checks = check_level
end
# list expanded environment variables
if (not ENVIRONMENT.empty?)
desc "List all configured environment variables."
task :environment do
env_list = []
ENVIRONMENT.each do |env|
env.each_key do |key|
name = key.to_s.upcase
env_list.push(" - #{name}: \"#{env[key]}\"")
end
end
env_list.sort.each do |env_line|
puts env_line
end
end
end
namespace :options do
COLLECTION_PROJECT_OPTIONS.each do |option_path|
option = File.basename(option_path, '.yml')
desc "Merge #{option} project options."
task option.downcase.to_sym do
hash = @ceedling[:project_config_manager].merge_options( @ceedling[:setupinator].config_hash, option_path )
@ceedling[:setupinator].do_setup( hash )
if @ceedling[:configurator].project_release_build
load(File.join(CEEDLING_LIB, 'ceedling', 'rules_release.rake'))
end
end
end
end
# do not present task if there's no plugins
if (not PLUGINS_ENABLED.empty?)
desc "Execute plugin result summaries (no build triggering)."
task :summary do
@ceedling[:plugin_manager].summary
puts "\nNOTE: Summaries may be out of date with project sources.\n\n"
end
end

View File

@@ -0,0 +1,103 @@
# rather than require 'rake/clean' & try to override, we replicate for finer control
CLEAN = Rake::FileList["**/*~", "**/*.bak"]
CLOBBER = Rake::FileList.new
CLEAN.clear_exclude.exclude { |fn| fn.pathmap("%f") == 'core' && File.directory?(fn) }
CLEAN.include(File.join(PROJECT_TEST_BUILD_OUTPUT_PATH, '*'))
CLEAN.include(File.join(PROJECT_TEST_RESULTS_PATH, '*'))
CLEAN.include(File.join(PROJECT_TEST_DEPENDENCIES_PATH, '*'))
CLEAN.include(File.join(PROJECT_BUILD_RELEASE_ROOT, '*.*'))
CLEAN.include(File.join(PROJECT_RELEASE_BUILD_OUTPUT_PATH, '*'))
CLEAN.include(File.join(PROJECT_RELEASE_DEPENDENCIES_PATH, '*'))
CLOBBER.include(File.join(PROJECT_BUILD_ARTIFACTS_ROOT, '**/*'))
CLOBBER.include(File.join(PROJECT_BUILD_TESTS_ROOT, '**/*'))
CLOBBER.include(File.join(PROJECT_BUILD_RELEASE_ROOT, '**/*'))
CLOBBER.include(File.join(PROJECT_LOG_PATH, '**/*'))
CLOBBER.include(File.join(PROJECT_TEMP_PATH, '**/*'))
# just in case they're using git, let's make sure we allow them to preserved the build directory if desired.
CLOBBER.exclude(File.join(TESTS_BASE_PATH), '**/.gitkeep')
# because of cmock config, mock path can optionally exist apart from standard test build paths
CLOBBER.include(File.join(CMOCK_MOCK_PATH, '*'))
REMOVE_FILE_PROC = Proc.new { |fn| rm_r fn rescue nil }
# redefine clean so we can override how it advertises itself
desc "Delete all build artifacts and temporary products."
task(:clean) do
# because :clean is a prerequisite for :clobber, intelligently display the progress message
if (not @ceedling[:task_invoker].invoked?(/^clobber$/))
@ceedling[:streaminator].stdout_puts("\nCleaning build artifacts...\n(For large projects, this task may take a long time to complete)\n\n")
end
begin
CLEAN.each { |fn| REMOVE_FILE_PROC.call(fn) }
rescue
end
end
# redefine clobber so we can override how it advertises itself
desc "Delete all generated files (and build artifacts)."
task(:clobber => [:clean]) do
@ceedling[:streaminator].stdout_puts("\nClobbering all generated files...\n(For large projects, this task may take a long time to complete)\n\n")
begin
CLOBBER.each { |fn| REMOVE_FILE_PROC.call(fn) }
rescue
end
end
PROJECT_BUILD_PATHS.each { |path| directory(path) }
# create directories that hold build output and generated files & touching rebuild dependency sources
task(:directories => PROJECT_BUILD_PATHS) { @ceedling[:dependinator].touch_force_rebuild_files }
# list paths discovered at load time
namespace :paths do
paths = @ceedling[:setupinator].config_hash[:paths]
paths.each_key do |section|
name = section.to_s.downcase
path_list = Object.const_get("COLLECTION_PATHS_#{name.upcase}")
if (path_list.size != 0)
desc "List all collected #{name} paths."
task(name.to_sym) { puts "#{name} paths:"; path_list.sort.each {|path| puts " - #{path}" } }
end
end
end
# list files & file counts discovered at load time
namespace :files do
categories = [
['test', COLLECTION_ALL_TESTS],
['source', COLLECTION_ALL_SOURCE],
['header', COLLECTION_ALL_HEADERS]
]
using_assembly = (defined?(TEST_BUILD_USE_ASSEMBLY) && TEST_BUILD_USE_ASSEMBLY) ||
(defined?(RELEASE_BUILD_USE_ASSEMBLY) && RELEASE_BUILD_USE_ASSEMBLY)
categories << ['assembly', COLLECTION_ALL_ASSEMBLY] if using_assembly
categories.each do |category|
name = category[0]
collection = category[1]
desc "List all collected #{name} files."
task(name.to_sym) do
puts "#{name} files:"
collection.sort.each { |filepath| puts " - #{filepath}" }
puts "file count: #{collection.size}"
end
end
end

View File

@@ -0,0 +1,30 @@
require 'ceedling/constants'
require 'ceedling/file_path_utils'
desc "Build release target."
task RELEASE_SYM => [:directories] do
header = "Release build '#{File.basename(PROJECT_RELEASE_BUILD_TARGET)}'"
@ceedling[:streaminator].stdout_puts("\n\n#{header}\n#{'-' * header.length}")
begin
@ceedling[:plugin_manager].pre_release
core_objects = []
extra_objects = @ceedling[:file_path_utils].form_release_build_c_objects_filelist( COLLECTION_RELEASE_ARTIFACT_EXTRA_LINK_OBJECTS )
@ceedling[:project_config_manager].process_release_config_change
core_objects.concat( @ceedling[:release_invoker].setup_and_invoke_c_objects( COLLECTION_ALL_SOURCE ) )
# if assembler use isn't enabled, COLLECTION_ALL_ASSEMBLY is empty array & nothing happens
core_objects.concat( @ceedling[:release_invoker].setup_and_invoke_asm_objects( COLLECTION_ALL_ASSEMBLY ) )
# if we're using libraries, we need to add those to our collection as well
library_objects = (defined? LIBRARIES_RELEASE && !LIBRARIES_RELEASE.empty?) ? LIBRARIES_RELEASE.flatten.compact : []
file( PROJECT_RELEASE_BUILD_TARGET => (core_objects + extra_objects + library_objects) )
Rake::Task[PROJECT_RELEASE_BUILD_TARGET].invoke
ensure
@ceedling[:plugin_manager].post_release
end
end

View File

@@ -0,0 +1,9 @@
require 'ceedling/constants'
namespace REFRESH_SYM do
task RELEASE_SYM do
@ceedling[:release_invoker].refresh_c_deep_dependencies
end
end

View File

@@ -0,0 +1,60 @@
require 'ceedling/constants'
task :test => [:directories] do
Rake.application['test:all'].invoke
end
namespace TEST_SYM do
desc "Run all unit tests (also just 'test' works)."
task :all => [:directories] do
@ceedling[:test_invoker].setup_and_invoke(COLLECTION_ALL_TESTS)
end
desc "Run single test ([*] real test or source file name, no path)."
task :* do
message = "\nOops! '#{TEST_ROOT_NAME}:*' isn't a real task. " +
"Use a real test or source file name (no path) in place of the wildcard.\n" +
"Example: rake #{TEST_ROOT_NAME}:foo.c\n\n"
@ceedling[:streaminator].stdout_puts( message )
end
desc "Run tests for changed files."
task :delta => [:directories] do
@ceedling[:test_invoker].setup_and_invoke(COLLECTION_ALL_TESTS, TEST_SYM, {:force_run => false})
end
desc "Just build tests without running."
task :build_only => [:directories] do
@ceedling[:test_invoker].setup_and_invoke(COLLECTION_ALL_TESTS, TEST_SYM, {:build_only => true})
end
desc "Run tests by matching regular expression pattern."
task :pattern, [:regex] => [:directories] do |t, args|
matches = []
COLLECTION_ALL_TESTS.each { |test| matches << test if (test =~ /#{args.regex}/) }
if (matches.size > 0)
@ceedling[:test_invoker].setup_and_invoke(matches, TEST_SYM, {:force_run => false})
else
@ceedling[:streaminator].stdout_puts("\nFound no tests matching pattern /#{args.regex}/.")
end
end
desc "Run tests whose test path contains [dir] or [dir] substring."
task :path, [:dir] => [:directories] do |t, args|
matches = []
COLLECTION_ALL_TESTS.each { |test| matches << test if File.dirname(test).include?(args.dir.gsub(/\\/, '/')) }
if (matches.size > 0)
@ceedling[:test_invoker].setup_and_invoke(matches, TEST_SYM, {:force_run => false})
else
@ceedling[:streaminator].stdout_puts("\nFound no tests including the given path or path component.")
end
end
end

View File

@@ -0,0 +1,9 @@
require 'ceedling/constants'
namespace REFRESH_SYM do
task TEST_SYM do
@ceedling[:test_invoker].refresh_deep_dependencies
end
end

View File

@@ -0,0 +1,35 @@
require 'ceedling/constants'
require 'ceedling/file_path_utils'
# create file dependencies to ensure C-based components of vendor tools are recompiled when they are updated with new versions
# forming these explicitly rather than depend on auxiliary dependencies so all scenarios are explicitly covered
file( @ceedling[:file_path_utils].form_test_build_c_object_filepath( UNITY_C_FILE ) => [
File.join( UNITY_VENDOR_PATH, UNITY_LIB_PATH, UNITY_C_FILE ),
File.join( UNITY_VENDOR_PATH, UNITY_LIB_PATH, UNITY_H_FILE ),
File.join( UNITY_VENDOR_PATH, UNITY_LIB_PATH, UNITY_INTERNALS_H_FILE ) ]
)
if (PROJECT_USE_MOCKS)
file( @ceedling[:file_path_utils].form_test_build_c_object_filepath( CMOCK_C_FILE ) => [
File.join( CMOCK_VENDOR_PATH, CMOCK_LIB_PATH, CMOCK_C_FILE ),
File.join( CMOCK_VENDOR_PATH, CMOCK_LIB_PATH, CMOCK_H_FILE ) ]
)
end
if (PROJECT_USE_EXCEPTIONS)
file( @ceedling[:file_path_utils].form_test_build_c_object_filepath( CEXCEPTION_C_FILE ) => [
File.join( CEXCEPTION_VENDOR_PATH, CEXCEPTION_LIB_PATH, CEXCEPTION_C_FILE ),
File.join( CEXCEPTION_VENDOR_PATH, CEXCEPTION_LIB_PATH, CEXCEPTION_H_FILE ) ]
)
end
if (PROJECT_USE_EXCEPTIONS and PROJECT_RELEASE_BUILD)
file( @ceedling[:file_path_utils].form_release_build_c_object_filepath( CEXCEPTION_C_FILE ) => [
File.join( CEXCEPTION_VENDOR_PATH, CEXCEPTION_LIB_PATH, CEXCEPTION_C_FILE ),
File.join( CEXCEPTION_VENDOR_PATH, CEXCEPTION_LIB_PATH, CEXCEPTION_H_FILE ) ]
)
end

View File

@@ -0,0 +1,85 @@
class TestIncludesExtractor
constructor :configurator, :yaml_wrapper, :file_wrapper
def setup
@includes = {}
@mocks = {}
end
# for includes_list file, slurp up array from yaml file and sort & store includes
def parse_includes_list(includes_list)
gather_and_store_includes( includes_list, @yaml_wrapper.load(includes_list) )
end
# open, scan for, and sort & store includes of test file
def parse_test_file(test)
gather_and_store_includes( test, extract_from_file(test) )
end
# mocks with no file extension
def lookup_raw_mock_list(test)
file_key = form_file_key(test)
return [] if @mocks[file_key].nil?
return @mocks[file_key]
end
# includes with file extension
def lookup_includes_list(file)
file_key = form_file_key(file)
return [] if (@includes[file_key]).nil?
return @includes[file_key]
end
private #################################
def form_file_key(filepath)
return File.basename(filepath).to_sym
end
def extract_from_file(file)
includes = []
header_extension = @configurator.extension_header
contents = @file_wrapper.read(file)
# remove line comments
contents = contents.gsub(/\/\/.*$/, '')
# remove block comments
contents = contents.gsub(/\/\*.*?\*\//m, '')
contents.split("\n").each do |line|
# look for include statement
scan_results = line.scan(/#include\s+\"\s*(.+#{'\\'+header_extension})\s*\"/)
includes << scan_results[0][0] if (scan_results.size > 0)
# look for TEST_FILE statement
scan_results = line.scan(/TEST_FILE\(\s*\"\s*(.+\.\w+)\s*\"\s*\)/)
includes << scan_results[0][0] if (scan_results.size > 0)
end
return includes.uniq
end
def gather_and_store_includes(file, includes)
mock_prefix = @configurator.cmock_mock_prefix
header_extension = @configurator.extension_header
file_key = form_file_key(file)
@mocks[file_key] = []
# add includes to lookup hash
@includes[file_key] = includes
includes.each do |include_file|
# check if include is a mock
scan_results = include_file.scan(/(#{mock_prefix}.+)#{'\\'+header_extension}/)
# add mock to lookup hash
@mocks[file_key] << scan_results[0][0] if (scan_results.size > 0)
end
end
end

View File

@@ -0,0 +1,188 @@
require 'ceedling/constants'
class TestInvoker
attr_reader :sources, :tests, :mocks
constructor :configurator,
:test_invoker_helper,
:plugin_manager,
:streaminator,
:preprocessinator,
:task_invoker,
:dependinator,
:project_config_manager,
:build_invoker_utils,
:file_path_utils,
:file_wrapper
def setup
@sources = []
@tests = []
@mocks = []
end
def get_test_definition_str(test)
return "-D" + File.basename(test, File.extname(test)).upcase.sub(/@.*$/, "")
end
def get_tools_compilers
tools_compilers = Hash.new
tools_compilers["for unit test"] = TOOLS_TEST_COMPILER if defined? TOOLS_TEST_COMPILER
tools_compilers["for gcov"] = TOOLS_GCOV_COMPILER if defined? TOOLS_GCOV_COMPILER
return tools_compilers
end
def add_test_definition(test)
test_definition_str = get_test_definition_str(test)
get_tools_compilers.each do |tools_compiler_key, tools_compiler_value|
tools_compiler_value[:arguments].push("-D#{File.basename(test, ".*").strip.upcase.sub(/@.*$/, "")}")
@streaminator.stdout_puts("Add the definition value in the build option #{tools_compiler_value[:arguments][-1]} #{tools_compiler_key}", Verbosity::OBNOXIOUS)
end
end
def delete_test_definition(test)
test_definition_str = get_test_definition_str(test)
get_tools_compilers.each do |tools_compiler_key, tools_compiler_value|
num_options = tools_compiler_value[:arguments].size
@streaminator.stdout_puts("Delete the definition value in the build option #{tools_compiler_value[:arguments][-1]} #{tools_compiler_key}", Verbosity::OBNOXIOUS)
tools_compiler_value[:arguments].delete_if{|i| i == test_definition_str}
if num_options > tools_compiler_value[:arguments].size + 1
@streaminator.stderr_puts("WARNING: duplicated test definition.")
end
end
end
# Convert libraries configuration form YAML configuration
# into a string that can be given to the compiler.
def convert_libraries_to_arguments()
if @configurator.project_config_hash.has_key?(:libraries_test)
lib_args = @configurator.project_config_hash[:libraries_test]
lib_args.flatten!
lib_flag = @configurator.project_config_hash[:libraries_flag]
lib_args.map! {|v| lib_flag.gsub(/\$\{1\}/, v) } if (defined? lib_flag)
return lib_args
end
end
def setup_and_invoke(tests, context=TEST_SYM, options={:force_run => true, :build_only => false})
@tests = tests
@project_config_manager.process_test_config_change
@tests.each do |test|
# announce beginning of test run
header = "Test '#{File.basename(test)}'"
@streaminator.stdout_puts("\n\n#{header}\n#{'-' * header.length}")
begin
@plugin_manager.pre_test( test )
test_name ="#{File.basename(test)}".chomp('.c')
def_test_key="defines_#{test_name.downcase}"
# Re-define the project out path and pre-processor defines.
if @configurator.project_config_hash.has_key?(def_test_key.to_sym)
@project_config_manager.test_config_changed
defs_bkp = Array.new(COLLECTION_DEFINES_TEST_AND_VENDOR)
printf " ************** Specific test definitions for #{test_name} !!! \n"
tst_defs_cfg = @configurator.project_config_hash[def_test_key.to_sym]
orig_path = @configurator.project_test_build_output_path
@configurator.project_config_hash[:project_test_build_output_path] = File.join(@configurator.project_test_build_output_path, test_name)
@file_wrapper.mkdir(@configurator.project_test_build_output_path)
COLLECTION_DEFINES_TEST_AND_VENDOR.replace(tst_defs_cfg)
# printf " * new defines = #{COLLECTION_DEFINES_TEST_AND_VENDOR}\n"
end
# collect up test fixture pieces & parts
runner = @file_path_utils.form_runner_filepath_from_test( test )
mock_list = @preprocessinator.preprocess_test_and_invoke_test_mocks( test )
sources = @test_invoker_helper.extract_sources( test )
extras = @configurator.collection_test_fixture_extra_link_objects
core = [test] + mock_list + sources
objects = @file_path_utils.form_test_build_objects_filelist( [runner] + core + extras )
results_pass = @file_path_utils.form_pass_results_filepath( test )
results_fail = @file_path_utils.form_fail_results_filepath( test )
@project_config_manager.process_test_defines_change(sources)
# add the definition value in the build option for the unit test
if @configurator.defines_use_test_definition
add_test_definition(test)
end
# clean results files so we have a missing file with which to kick off rake's dependency rules
@test_invoker_helper.clean_results( {:pass => results_pass, :fail => results_fail}, options )
# load up auxiliary dependencies so deep changes cause rebuilding appropriately
@test_invoker_helper.process_deep_dependencies( core ) do |dependencies_list|
@dependinator.load_test_object_deep_dependencies( dependencies_list )
end
# tell rake to create test runner if needed
@task_invoker.invoke_test_runner( runner )
# enhance object file dependencies to capture externalities influencing regeneration
@dependinator.enhance_test_build_object_dependencies( objects )
# associate object files with executable
@dependinator.setup_test_executable_dependencies( test, objects )
# build test objects
@task_invoker.invoke_test_objects( objects )
# if the option build_only has been specified, build only the executable
# but don't run the test
if (options[:build_only])
executable = @file_path_utils.form_test_executable_filepath( test )
@task_invoker.invoke_test_executable( executable )
else
# 3, 2, 1... launch
@task_invoker.invoke_test_results( results_pass )
end
rescue => e
@build_invoker_utils.process_exception( e, context )
ensure
# delete the definition value in the build option for the unit test
if @configurator.defines_use_test_definition
delete_test_definition(test)
end
@plugin_manager.post_test( test )
# restore the project test defines
if @configurator.project_config_hash.has_key?(def_test_key.to_sym)
# @configurator.project_config_hash[:defines_test] =
COLLECTION_DEFINES_TEST_AND_VENDOR.replace(defs_bkp)
# printf " ---- Restored defines at #{defs_bkp}"
@configurator.project_config_hash[:project_test_build_output_path] = orig_path
printf " ************** Restored defines and build path\n"
end
end
# store away what's been processed
@mocks.concat( mock_list )
@sources.concat( sources )
@task_invoker.first_run = false
end
# post-process collected mock list
@mocks.uniq!
# post-process collected sources list
@sources.uniq!
end
def refresh_deep_dependencies
@file_wrapper.rm_f(
@file_wrapper.directory_listing(
File.join( @configurator.project_test_dependencies_path, '*' + @configurator.extension_dependencies ) ) )
@test_invoker_helper.process_deep_dependencies(
@configurator.collection_all_tests + @configurator.collection_all_source )
end
end

View File

@@ -0,0 +1,32 @@
class TestInvokerHelper
constructor :configurator, :task_invoker, :test_includes_extractor, :file_finder, :file_path_utils, :file_wrapper
def clean_results(results, options)
@file_wrapper.rm_f( results[:fail] )
@file_wrapper.rm_f( results[:pass] ) if (options[:force_run])
end
def process_deep_dependencies(files)
return if (not @configurator.project_use_deep_dependencies)
dependencies_list = @file_path_utils.form_test_dependencies_filelist( files )
if @configurator.project_generate_deep_dependencies
@task_invoker.invoke_test_dependencies_files( dependencies_list )
end
yield( dependencies_list ) if block_given?
end
def extract_sources(test)
sources = []
includes = @test_includes_extractor.lookup_includes_list(test)
includes.each { |include| sources << @file_finder.find_compilation_input_file(include, :ignore) }
return sources.compact
end
end

View File

@@ -0,0 +1,229 @@
require 'ceedling/constants'
require 'benchmark'
class ShellExecutionException < RuntimeError
attr_reader :shell_result
def initialize(shell_result)
@shell_result = shell_result
end
end
class ToolExecutor
constructor :configurator, :tool_executor_helper, :streaminator, :system_wrapper
def setup
@tool_name = ''
@executable = ''
end
# build up a command line from yaml provided config
# @param extra_params is an array of parameters to append to executable
def build_command_line(tool_config, extra_params, *args)
@tool_name = tool_config[:name]
@executable = tool_config[:executable]
command = {}
# basic premise is to iterate top to bottom through arguments using '$' as
# a string replacement indicator to expand globals or inline yaml arrays
# into command line arguments via substitution strings
# executable must be quoted if it includes spaces (common on windows)
executable = @tool_executor_helper.osify_path_separators( expandify_element(@executable, *args) )
executable = "\"#{executable}\"" if executable.include?(' ')
command[:line] = [
executable,
extra_params.join(' ').strip,
build_arguments(tool_config[:arguments], *args),
].reject{|s| s.nil? || s.empty?}.join(' ').strip
command[:options] = {
:stderr_redirect => @tool_executor_helper.stderr_redirection(tool_config, @configurator.project_logging),
:background_exec => tool_config[:background_exec]
}
return command
end
# shell out, execute command, and return response
def exec(command, options={}, args=[])
options[:boom] = true if (options[:boom].nil?)
options[:stderr_redirect] = StdErrRedirect::NONE if (options[:stderr_redirect].nil?)
options[:background_exec] = BackgroundExec::NONE if (options[:background_exec].nil?)
# build command line
command_line = [
@tool_executor_helper.background_exec_cmdline_prepend( options ),
command.strip,
args,
@tool_executor_helper.stderr_redirect_cmdline_append( options ),
@tool_executor_helper.background_exec_cmdline_append( options ),
].flatten.compact.join(' ')
@streaminator.stderr_puts("Verbose: #{__method__.to_s}(): #{command_line}", Verbosity::DEBUG)
shell_result = {}
# depending on background exec option, we shell out differently
time = Benchmark.realtime do
if (options[:background_exec] != BackgroundExec::NONE)
shell_result = @system_wrapper.shell_system( command_line, options[:boom] )
else
shell_result = @system_wrapper.shell_backticks( command_line, options[:boom] )
end
end
shell_result[:time] = time
#scrub the string for illegal output
unless shell_result[:output].nil?
shell_result[:output] = shell_result[:output].scrub if "".respond_to?(:scrub)
shell_result[:output].gsub!(/\033\[\d\dm/,'')
end
@tool_executor_helper.print_happy_results( command_line, shell_result, options[:boom] )
@tool_executor_helper.print_error_results( command_line, shell_result, options[:boom] )
# go boom if exit code isn't 0 (but in some cases we don't want a non-0 exit code to raise)
raise ShellExecutionException.new(shell_result) if ((shell_result[:exit_code] != 0) and options[:boom])
return shell_result
end
private #############################
def build_arguments(config, *args)
build_string = ''
return nil if (config.nil?)
# iterate through each argument
# the yaml blob array needs to be flattened so that yaml substitution
# is handled correctly, since it creates a nested array when an anchor is
# dereferenced
config.flatten.each do |element|
argument = ''
case(element)
# if we find a simple string then look for string replacement operators
# and expand with the parameters in this method's argument list
when String then argument = expandify_element(element, *args)
# if we find a hash, then we grab the key as a substitution string and expand the
# hash's value(s) within that substitution string
when Hash then argument = dehashify_argument_elements(element)
end
build_string.concat("#{argument} ") if (argument.length > 0)
end
build_string.strip!
return build_string if (build_string.length > 0)
return nil
end
# handle simple text string argument & argument array string replacement operators
def expandify_element(element, *args)
match = //
to_process = nil
args_index = 0
# handle ${#} input replacement
if (element =~ TOOL_EXECUTOR_ARGUMENT_REPLACEMENT_PATTERN)
args_index = ($2.to_i - 1)
if (args.nil? or args[args_index].nil?)
@streaminator.stderr_puts("ERROR: Tool '#{@tool_name}' expected valid argument data to accompany replacement operator #{$1}.", Verbosity::ERRORS)
raise
end
match = /#{Regexp.escape($1)}/
to_process = args[args_index]
end
# simple string argument: replace escaped '\$' and strip
element.sub!(/\\\$/, '$')
element.strip!
# handle inline ruby execution
if (element =~ RUBY_EVAL_REPLACEMENT_PATTERN)
element.replace(eval($1))
end
build_string = ''
# handle array or anything else passed into method to be expanded in place of replacement operators
case (to_process)
when Array then to_process.each {|value| build_string.concat( "#{element.sub(match, value.to_s)} " ) } if (to_process.size > 0)
else build_string.concat( element.sub(match, to_process.to_s) )
end
# handle inline ruby string substitution
if (build_string =~ RUBY_STRING_REPLACEMENT_PATTERN)
build_string.replace(@system_wrapper.module_eval(build_string))
end
return build_string.strip
end
# handle argument hash: keys are substitution strings, values are data to be expanded within substitution strings
def dehashify_argument_elements(hash)
build_string = ''
elements = []
# grab the substitution string (hash key)
substitution = hash.keys[0].to_s
# grab the string(s) to squirt into the substitution string (hash value)
expand = hash[hash.keys[0]]
if (expand.nil?)
@streaminator.stderr_puts("ERROR: Tool '#{@tool_name}' could not expand nil elements for substitution string '#{substitution}'.", Verbosity::ERRORS)
raise
end
# array-ify expansion input if only a single string
expansion = ((expand.class == String) ? [expand] : expand)
expansion.each do |item|
# code eval substitution
if (item =~ RUBY_EVAL_REPLACEMENT_PATTERN)
elements << eval($1)
# string eval substitution
elsif (item =~ RUBY_STRING_REPLACEMENT_PATTERN)
elements << @system_wrapper.module_eval(item)
# global constants
elsif (@system_wrapper.constants_include?(item))
const = Object.const_get(item)
if (const.nil?)
@streaminator.stderr_puts("ERROR: Tool '#{@tool_name}' found constant '#{item}' to be nil.", Verbosity::ERRORS)
raise
else
elements << const
end
elsif (item.class == Array)
elements << item
elsif (item.class == String)
@streaminator.stderr_puts("ERROR: Tool '#{@tool_name}' cannot expand nonexistent value '#{item}' for substitution string '#{substitution}'.", Verbosity::ERRORS)
raise
else
@streaminator.stderr_puts("ERROR: Tool '#{@tool_name}' cannot expand value having type '#{item.class}' for substitution string '#{substitution}'.", Verbosity::ERRORS)
raise
end
end
# expand elements (whether string or array) into substitution string & replace escaped '\$'
elements.flatten!
elements.each do |element|
build_string.concat( substitution.sub(/([^\\]*)\$/, "\\1#{element}") ) # don't replace escaped '\$' but allow us to replace just a lonesome '$'
build_string.gsub!(/\\\$/, '$')
build_string.concat(' ')
end
return build_string.strip
end
end

View File

@@ -0,0 +1,164 @@
require 'ceedling/constants' # for Verbosity enumeration & $stderr redirect enumeration
##
# Helper functions for the tool executor
class ToolExecutorHelper
constructor :streaminator, :system_utils, :system_wrapper
##
# Returns the stderr redirection based on the config and logging.
# ==== Attributes
#
# * _tool_config_: A hash containing config information.
# * _logging_: A boolean representing if logging is enabled or not.
#
def stderr_redirection(tool_config, logging)
# if there's no logging enabled, return :stderr_redirect unmodified
return tool_config[:stderr_redirect] if (not logging)
# if there is logging enabled but the redirect is a custom value (not enum), return the custom string
return tool_config[:stderr_redirect] if (tool_config[:stderr_redirect].class == String)
# if logging is enabled but there's no custom string, return the AUTO enumeration so $stderr goes into the log
return StdErrRedirect::AUTO
end
##
# Returns the background execution prepend based on the config.
# ==== Attributes
#
# * _tool_config_: A hash containing config information.
#
def background_exec_cmdline_prepend(tool_config)
return nil if (tool_config.nil? || tool_config[:background_exec].nil?)
config_exec = tool_config[:background_exec]
if ((config_exec == BackgroundExec::AUTO) and (@system_wrapper.windows?))
return 'start'
end
if (config_exec == BackgroundExec::WIN)
return 'start'
end
return nil
end
##
# Modifies an executables path based on platform.
# ==== Attributes
#
# * _executable_: The executable's path.
#
def osify_path_separators(executable)
return executable.gsub(/\//, '\\') if (@system_wrapper.windows?)
return executable
end
##
# Returns the stderr redirect append based on the config.
# ==== Attributes
#
# * _tool_config_: A hash containing config information.
#
def stderr_redirect_cmdline_append(tool_config)
return nil if (tool_config.nil? || tool_config[:stderr_redirect].nil?)
config_redirect = tool_config[:stderr_redirect]
redirect = StdErrRedirect::NONE
if (config_redirect == StdErrRedirect::AUTO)
if (@system_wrapper.windows?)
redirect = StdErrRedirect::WIN
elsif (@system_utils.tcsh_shell?)
redirect = StdErrRedirect::TCSH
else
redirect = StdErrRedirect::UNIX
end
end
case redirect
# we may need more complicated processing after some learning with various environments
when StdErrRedirect::NONE then nil
when StdErrRedirect::WIN then '2>&1'
when StdErrRedirect::UNIX then '2>&1'
when StdErrRedirect::TCSH then '|&'
else redirect.to_s
end
end
##
# Returns the background execution append based on the config.
# ==== Attributes
#
# * _tool_config_: A hash containing config information.
#
def background_exec_cmdline_append(tool_config)
return nil if (tool_config.nil? || tool_config[:background_exec].nil?)
config_exec = tool_config[:background_exec]
# if :auto & windows, then we already prepended 'start' and should append nothing
return nil if ((config_exec == BackgroundExec::AUTO) and (@system_wrapper.windows?))
# if :auto & not windows, then we append standard '&'
return '&' if ((config_exec == BackgroundExec::AUTO) and (not @system_wrapper.windows?))
# if explicitly Unix, then append '&'
return '&' if (config_exec == BackgroundExec::UNIX)
# * _command_str_: A hash containing config information.
# all other cases, including :none, :win, & anything unrecognized, append nothing
return nil
end
##
# Outputs success results if command succeeded and we have verbosity cranked up.
# ==== Attributes
#
# * _command_str_: The command ran.
# * _shell_results_: The outputs of the command including exit code and
# output.
# * _boom_: A boolean representing if a non zero result is erroneous.
#
def print_happy_results(command_str, shell_result, boom=true)
if ((shell_result[:exit_code] == 0) or ((shell_result[:exit_code] != 0) and not boom))
output = "> Shell executed command:\n"
output += "'#{command_str}'\n"
output += "> Produced output:\n" if (not shell_result[:output].empty?)
output += "#{shell_result[:output].strip}\n" if (not shell_result[:output].empty?)
output += "> And exited with status: [#{shell_result[:exit_code]}].\n" if (shell_result[:exit_code] != 0)
output += "\n"
@streaminator.stdout_puts(output, Verbosity::OBNOXIOUS)
end
end
##
# Outputs failures results if command failed and we have verbosity set to minimum error level.
# ==== Attributes
#
# * _command_str_: The command ran.
# * _shell_results_: The outputs of the command including exit code and
# output.
# * _boom_: A boolean representing if a non zero result is erroneous.
#
def print_error_results(command_str, shell_result, boom=true)
if ((shell_result[:exit_code] != 0) and boom)
output = "ERROR: Shell command failed.\n"
output += "> Shell executed command:\n"
output += "'#{command_str}'\n"
output += "> Produced output:\n" if (not shell_result[:output].empty?)
output += "#{shell_result[:output].strip}\n" if (not shell_result[:output].empty?)
output += "> And exited with status: [#{shell_result[:exit_code]}].\n" if (shell_result[:exit_code] != nil)
output += "> And then likely crashed.\n" if (shell_result[:exit_code] == nil)
output += "\n"
@streaminator.stderr_puts(output, Verbosity::ERRORS)
end
end
end

View File

@@ -0,0 +1,10 @@
class Verbosinator
constructor :configurator
def should_output?(level)
return (level <= @configurator.project_verbosity)
end
end

View File

@@ -0,0 +1,36 @@
# @private
module Ceedling
module Version
# Check for local or global version of vendor directory in order to look up versions
{
"CEXCEPTION" => File.join("vendor","c_exception","lib","CException.h"),
"CMOCK" => File.join("vendor","cmock","src","cmock.h"),
"UNITY" => File.join("vendor","unity","src","unity.h"),
}.each_pair do |name, path|
filename = if (File.exist?(File.join("..","..",path)))
File.join("..","..",path)
elsif (File.exist?(File.join(File.dirname(__FILE__),"..","..",path)))
File.join(File.dirname(__FILE__),"..","..",path)
else
eval "#{name} = 'unknown'"
continue
end
# Actually look up the versions
a = [0,0,0]
File.readlines(filename) do |line|
["VERSION_MAJOR", "VERSION_MINOR", "VERSION_BUILD"].each_with_index do |field, i|
m = line.match(/#{name}_#{field}\s+(\d+)/)
a[i] = m[1] unless (m.nil?)
end
end
# Make a constant from each, so that we can use it elsewhere
eval "#{name} = '#{a.join(".")}'"
end
GEM = "0.29.0"
CEEDLING = GEM
end
end

View File

@@ -0,0 +1,15 @@
# @private
module Ceedling
module Version
# @private
GEM = "0.27.0"
# @private
CEEDLING = "<%= versions["CEEDLING"] %>"
# @private
CEXCEPTION = "<%= versions["CEXCEPTION"] %>"
# @private
CMOCK = "<%= versions["CMOCK"] %>"
# @private
UNITY = "<%= versions["UNITY"] %>"
end
end

View File

@@ -0,0 +1,17 @@
require 'yaml'
require 'erb'
class YamlWrapper
def load(filepath)
return YAML.load(ERB.new(File.read(filepath)).result)
end
def dump(filepath, structure)
File.open(filepath, 'w') do |output|
YAML.dump(structure, output)
end
end
end

View File

@@ -0,0 +1,22 @@
ceedling-beep
=============
This is a simple plugin that just beeps at the end of a build and/or test sequence. Are you getting too distracted surfing
the internet, chatting with coworkers, or swordfighting while it's building or testing? The friendly beep will let you know
it's time to pay attention again.
This plugin has very few configuration options. At this time it can beep on completion of a task and/or on an error condition.
For each of these, you can configure the method that it should beep.
```
:tools:
:beep_on_done: :bell
:beep_on_error: :bell
```
Each of these have the following options:
- :bell - this option uses the ASCII bell character out stdout
- :speaker_test - this uses the linux speaker-test command if installed
Very likely, we'll be adding to this list if people find this to be useful.

View File

@@ -0,0 +1,40 @@
require 'ceedling/plugin'
require 'ceedling/constants'
class Beep < Plugin
attr_reader :config
def setup
@config = {
:on_done => ((defined? TOOLS_BEEP_ON_DONE) ? TOOLS_BEEP_ON_DONE : :bell ),
:on_error => ((defined? TOOLS_BEEP_ON_ERROR) ? TOOLS_BEEP_ON_ERROR : :bell ),
}
end
def post_build
beep @config[:on_done]
end
def post_error
beep @config[:on_error]
end
private
def beep(method = :none)
case method
when :bell
if (SystemWrapper.windows?)
puts "echo '\007'"
else
puts "echo -ne '\007'"
end
when :speaker_test
`speaker-test -t sine -f 1000 -l 1`
else
#do nothing with illegal or :none
end
end
end

View File

@@ -0,0 +1,15 @@
% function_string = hash[:coverage][:functions].to_s
% branch_string = hash[:coverage][:branches].to_s
% format_string = "%#{[function_string.length, branch_string.length].max}i"
<%=@ceedling[:plugin_reportinator].generate_banner("#{hash[:header]}: CODE COVERAGE SUMMARY")%>
% if (!hash[:coverage][:functions].nil?)
FUNCTIONS: <%=sprintf(format_string, hash[:coverage][:functions])%>%
% else
FUNCTIONS: none
% end
% if (!hash[:coverage][:branches].nil?)
BRANCHES: <%=sprintf(format_string, hash[:coverage][:branches])%>%
% else
BRANCHES: none
% end

View File

@@ -0,0 +1,169 @@
directory(BULLSEYE_BUILD_OUTPUT_PATH)
directory(BULLSEYE_RESULTS_PATH)
directory(BULLSEYE_ARTIFACTS_PATH)
directory(BULLSEYE_DEPENDENCIES_PATH)
CLEAN.include(File.join(BULLSEYE_BUILD_OUTPUT_PATH, '*'))
CLEAN.include(File.join(BULLSEYE_RESULTS_PATH, '*'))
CLEAN.include(File.join(BULLSEYE_DEPENDENCIES_PATH, '*'))
CLOBBER.include(File.join(BULLSEYE_BUILD_PATH, '**/*'))
PLUGINS_BULLSEYE_LIB_PATH = 'C:\\tools\\BullseyeCoverage\\lib' if not defined?(PLUGINS_BULLSEYE_LIB_PATH)
rule(/#{BULLSEYE_BUILD_OUTPUT_PATH}\/#{'.+\\'+EXTENSION_OBJECT}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name)
end
]) do |object|
if File.basename(object.source) =~ /^(#{PROJECT_TEST_FILE_PREFIX}|#{CMOCK_MOCK_PREFIX}|#{BULLSEYE_IGNORE_SOURCES.join('|')})/i
@ceedling[:generator].generate_object_file(
TOOLS_BULLSEYE_COMPILER,
OPERATION_COMPILE_SYM,
BULLSEYE_SYM,
object.source,
object.name,
@ceedling[:file_path_utils].form_test_build_list_filepath(object.name)
)
else
@ceedling[BULLSEYE_SYM].generate_coverage_object_file(object.source, object.name)
end
end
rule(/#{BULLSEYE_BUILD_OUTPUT_PATH}\/#{'.+\\'+EXTENSION_EXECUTABLE}$/) do |bin_file|
@ceedling[:generator].generate_executable_file(
TOOLS_BULLSEYE_LINKER,
BULLSEYE_SYM,
bin_file.prerequisites,
bin_file.name,
@ceedling[:file_path_utils].form_test_build_map_filepath(bin_file.name)
)
end
rule(/#{BULLSEYE_RESULTS_PATH}\/#{'.+\\'+EXTENSION_TESTPASS}$/ => [
proc do |task_name|
@ceedling[:file_path_utils].form_test_executable_filepath(task_name)
end
]) do |test_result|
@ceedling[:generator].generate_test_results(TOOLS_BULLSEYE_FIXTURE, BULLSEYE_SYM, test_result.source, test_result.name)
end
rule(/#{BULLSEYE_DEPENDENCIES_PATH}\/#{'.+\\'+EXTENSION_DEPENDENCIES}$/ => [
proc do |task_name|
@ceedling[:file_finder].find_compilation_input_file(task_name)
end
]) do |dep|
@ceedling[:generator].generate_dependencies_file(
TOOLS_TEST_DEPENDENCIES_GENERATOR,
BULLSEYE_SYM,
dep.source,
File.join(BULLSEYE_BUILD_OUTPUT_PATH, File.basename(dep.source).ext(EXTENSION_OBJECT) ),
dep.name
)
end
task :directories => [BULLSEYE_BUILD_OUTPUT_PATH, BULLSEYE_RESULTS_PATH, BULLSEYE_DEPENDENCIES_PATH, BULLSEYE_ARTIFACTS_PATH]
namespace BULLSEYE_SYM do
task source_coverage: COLLECTION_ALL_SOURCE.pathmap("#{BULLSEYE_BUILD_OUTPUT_PATH}/%n#{@ceedling[:configurator].extension_object}")
desc 'Run code coverage for all tests'
task all: [:directories] do
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].setup_and_invoke(COLLECTION_ALL_TESTS, BULLSEYE_SYM)
@ceedling[:configurator].restore_config
end
desc "Run single test w/ coverage ([*] real test or source file name, no path)."
task :* do
message = "\nOops! '#{BULLSEYE_ROOT_NAME}:*' isn't a real task. " +
"Use a real test or source file name (no path) in place of the wildcard.\n" +
"Example: rake #{BULLSEYE_ROOT_NAME}:foo.c\n\n"
@ceedling[:streaminator].stdout_puts( message )
end
desc 'Run tests by matching regular expression pattern.'
task :pattern, [:regex] => [:directories] do |_t, args|
matches = []
COLLECTION_ALL_TESTS.each do |test|
matches << test if test =~ /#{args.regex}/
end
if !matches.empty?
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].setup_and_invoke(matches, BULLSEYE_SYM, force_run: false)
@ceedling[:configurator].restore_config
else
@ceedling[:streaminator].stdout_puts("\nFound no tests matching pattern /#{args.regex}/.")
end
end
desc 'Run tests whose test path contains [dir] or [dir] substring.'
task :path, [:dir] => [:directories] do |_t, args|
matches = []
COLLECTION_ALL_TESTS.each do |test|
matches << test if File.dirname(test).include?(args.dir.tr('\\', '/'))
end
if !matches.empty?
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].setup_and_invoke(matches, BULLSEYE_SYM, force_run: false)
@ceedling[:configurator].restore_config
else
@ceedling[:streaminator].stdout_puts("\nFound no tests including the given path or path component.")
end
end
desc 'Run code coverage for changed files'
task delta: [:directories] do
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].setup_and_invoke(COLLECTION_ALL_TESTS, BULLSEYE_SYM, {:force_run => false})
@ceedling[:configurator].restore_config
end
# use a rule to increase efficiency for large projects
# bullseye test tasks by regex
rule(/^#{BULLSEYE_TASK_ROOT}\S+$/ => [
proc do |task_name|
test = task_name.sub(/#{BULLSEYE_TASK_ROOT}/, '')
test = "#{PROJECT_TEST_FILE_PREFIX}#{test}" unless test.start_with?(PROJECT_TEST_FILE_PREFIX)
@ceedling[:file_finder].find_test_from_file_path(test)
end
]) do |test|
@ceedling[:rake_wrapper][:directories].invoke
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].setup_and_invoke([test.source], BULLSEYE_SYM)
@ceedling[:configurator].restore_config
end
end
if PROJECT_USE_DEEP_DEPENDENCIES
namespace REFRESH_SYM do
task BULLSEYE_SYM do
@ceedling[:configurator].replace_flattened_config(@ceedling[BULLSEYE_SYM].config)
@ceedling[BULLSEYE_SYM].enableBullseye(true)
@ceedling[:test_invoker].refresh_deep_dependencies
@ceedling[:configurator].restore_config
end
end
end
namespace UTILS_SYM do
desc "Open Bullseye code coverage browser"
task BULLSEYE_SYM do
command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_BROWSER, [])
@ceedling[:tool_executor].exec(command[:line], command[:options])
end
end

View File

@@ -0,0 +1,57 @@
---
:bullseye:
:auto_license: TRUE
:plugins:
:bullseye_lib_path: []
:paths:
:bullseye_toolchain_include: []
:tools:
:bullseye_instrumentation:
:executable: covc
:arguments:
- '--file $': ENVIRONMENT_COVFILE
- -q
- ${1}
:bullseye_compiler:
:executable: gcc
:arguments:
- -g
- -I"$": COLLECTION_PATHS_TEST_SUPPORT_SOURCE_INCLUDE_VENDOR
- -I"$": COLLECTION_PATHS_BULLSEYE_TOOLCHAIN_INCLUDE
- -D$: COLLECTION_DEFINES_TEST_AND_VENDOR
- -DBULLSEYE_COMPILER
- -c "${1}"
- -o "${2}"
:bullseye_linker:
:executable: gcc
:arguments:
- ${1}
- -o ${2}
- -L$: PLUGINS_BULLSEYE_LIB_PATH
- -lcov
:bullseye_fixture:
:executable: ${1}
:bullseye_report_covsrc:
:executable: covsrc
:arguments:
- '--file $': ENVIRONMENT_COVFILE
- -q
- -w140
:bullseye_report_covfn:
:executable: covfn
:stderr_redirect: :auto
:arguments:
- '--file $': ENVIRONMENT_COVFILE
- --width 120
- --no-source
- '"${1}"'
:bullseye_browser:
:executable: CoverageBrowser
:background_exec: :auto
:optional: TRUE
:arguments:
- '"$"': ENVIRONMENT_COVFILE
...

View File

@@ -0,0 +1,194 @@
require 'ceedling/plugin'
require 'ceedling/constants'
BULLSEYE_ROOT_NAME = 'bullseye'
BULLSEYE_TASK_ROOT = BULLSEYE_ROOT_NAME + ':'
BULLSEYE_SYM = BULLSEYE_ROOT_NAME.to_sym
BULLSEYE_BUILD_PATH = "#{PROJECT_BUILD_ROOT}/#{BULLSEYE_ROOT_NAME}"
BULLSEYE_BUILD_OUTPUT_PATH = "#{BULLSEYE_BUILD_PATH}/out"
BULLSEYE_RESULTS_PATH = "#{BULLSEYE_BUILD_PATH}/results"
BULLSEYE_DEPENDENCIES_PATH = "#{BULLSEYE_BUILD_PATH}/dependencies"
BULLSEYE_ARTIFACTS_PATH = "#{PROJECT_BUILD_ARTIFACTS_ROOT}/#{BULLSEYE_ROOT_NAME}"
BULLSEYE_IGNORE_SOURCES = ['unity', 'cmock', 'cexception']
class Bullseye < Plugin
def setup
@result_list = []
@environment = [ {:covfile => File.join( BULLSEYE_ARTIFACTS_PATH, 'test.cov' )} ]
@plugin_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
@coverage_template_all = @ceedling[:file_wrapper].read(File.join(@plugin_root, 'assets/template.erb'))
end
def config
{
:project_test_build_output_path => BULLSEYE_BUILD_OUTPUT_PATH,
:project_test_results_path => BULLSEYE_RESULTS_PATH,
:project_test_dependencies_path => BULLSEYE_DEPENDENCIES_PATH,
:defines_test => DEFINES_TEST + ['CODE_COVERAGE'],
:collection_defines_test_and_vendor => COLLECTION_DEFINES_TEST_AND_VENDOR + ['CODE_COVERAGE']
}
end
def generate_coverage_object_file(source, object)
arg_hash = {:tool => TOOLS_BULLSEYE_INSTRUMENTATION, :context => BULLSEYE_SYM, :source => source, :object => object}
@ceedling[:plugin_manager].pre_compile_execute(arg_hash)
@ceedling[:streaminator].stdout_puts("Compiling #{File.basename(source)} with coverage...")
compile_command =
@ceedling[:tool_executor].build_command_line(
TOOLS_BULLSEYE_COMPILER,
@ceedling[:flaginator].flag_down( OPERATION_COMPILE_SYM, BULLSEYE_SYM, source ),
source,
object,
@ceedling[:file_path_utils].form_test_build_list_filepath( object ) )
coverage_command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_INSTRUMENTATION, [], compile_command[:line] )
shell_result = @ceedling[:tool_executor].exec( coverage_command[:line], coverage_command[:options] )
arg_hash[:shell_result] = shell_result
@ceedling[:plugin_manager].post_compile_execute(arg_hash)
end
def post_test_fixture_execute(arg_hash)
result_file = arg_hash[:result_file]
if ((result_file =~ /#{BULLSEYE_RESULTS_PATH}/) and (not @result_list.include?(result_file)))
@result_list << arg_hash[:result_file]
end
end
def post_build
return if (not @ceedling[:task_invoker].invoked?(/^#{BULLSEYE_TASK_ROOT}/))
# test results
results = @ceedling[:plugin_reportinator].assemble_test_results(@result_list)
hash = {
:header => BULLSEYE_ROOT_NAME.upcase,
:results => results
}
@ceedling[:plugin_reportinator].run_test_results_report(hash) do
message = ''
message = 'Unit test failures.' if (results[:counts][:failed] > 0)
message
end
# coverage results
return if (verify_coverage_file() == false)
if (@ceedling[:task_invoker].invoked?(/^#{BULLSEYE_TASK_ROOT}(all|delta)/))
command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_REPORT_COVSRC, [])
shell_result = @ceedling[:tool_executor].exec(command[:line], command[:options])
report_coverage_results_all(shell_result[:output])
else
report_per_function_coverage_results(@ceedling[:test_invoker].sources)
end
end
def summary
return if (verify_coverage_file() == false)
result_list = @ceedling[:file_path_utils].form_pass_results_filelist( BULLSEYE_RESULTS_PATH, COLLECTION_ALL_TESTS )
# test results
# get test results for only those tests in our configuration and of those only tests with results on disk
hash = {
:header => BULLSEYE_ROOT_NAME.upcase,
:results => @ceedling[:plugin_reportinator].assemble_test_results(result_list, {:boom => false})
}
@ceedling[:plugin_reportinator].run_test_results_report(hash)
# coverage results
command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_REPORT_COVSRC)
shell_result = @ceedling[:tool_executor].exec(command[:line], command[:options])
report_coverage_results_all(shell_result[:output])
end
def enableBullseye(enable)
if BULLSEYE_AUTO_LICENSE
if (enable)
args = ['push', 'on']
@ceedling[:streaminator].stdout_puts("Enabling Bullseye")
else
args = ['pop']
@ceedling[:streaminator].stdout_puts("Reverting Bullseye to previous state")
end
args.each do |arg|
command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_BUILD_ENABLE_DISABLE, [], arg)
shell_result = @ceedling[:tool_executor].exec(command[:line], command[:options])
end
end
end
private ###################################
def report_coverage_results_all(coverage)
results = {
:header => BULLSEYE_ROOT_NAME.upcase,
:coverage => {
:functions => nil,
:branches => nil
}
}
if (coverage =~ /^Total.*?=\s+([0-9]+)\%/)
results[:coverage][:functions] = $1.to_i
end
if (coverage =~ /^Total.*=\s+([0-9]+)\%\s*$/)
results[:coverage][:branches] = $1.to_i
end
@ceedling[:plugin_reportinator].run_report($stdout, @coverage_template_all, results)
end
def report_per_function_coverage_results(sources)
banner = @ceedling[:plugin_reportinator].generate_banner( "#{BULLSEYE_ROOT_NAME.upcase}: CODE COVERAGE SUMMARY" )
@ceedling[:streaminator].stdout_puts "\n" + banner
coverage_sources = sources.clone
coverage_sources.delete_if {|item| item =~ /#{CMOCK_MOCK_PREFIX}.+#{EXTENSION_SOURCE}$/}
coverage_sources.delete_if {|item| item =~ /#{BULLSEYE_IGNORE_SOURCES.join('|')}#{EXTENSION_SOURCE}$/}
coverage_sources.each do |source|
command = @ceedling[:tool_executor].build_command_line(TOOLS_BULLSEYE_REPORT_COVFN, [], source)
shell_results = @ceedling[:tool_executor].exec(command[:line], command[:options])
coverage_results = shell_results[:output].deep_clone
coverage_results.sub!(/.*\n.*\n/,'') # Remove the Bullseye tool banner
if (coverage_results =~ /warning cov814: report is empty/)
coverage_results = "WARNING: #{source} contains no coverage data!\n\n"
@ceedling[:streaminator].stdout_puts(coverage_results, Verbosity::COMPLAIN)
else
coverage_results += "\n"
@ceedling[:streaminator].stdout_puts(coverage_results)
end
end
end
def verify_coverage_file
exist = @ceedling[:file_wrapper].exist?( ENVIRONMENT_COVFILE )
if (!exist)
banner = @ceedling[:plugin_reportinator].generate_banner( "#{BULLSEYE_ROOT_NAME.upcase}: CODE COVERAGE SUMMARY" )
@ceedling[:streaminator].stdout_puts "\n" + banner + "\nNo coverage file.\n\n"
end
return exist
end
end
# end blocks always executed following rake run
END {
# cache our input configurations to use in comparison upon next execution
if (@ceedling[:task_invoker].invoked?(/^#{BULLSEYE_TASK_ROOT}/))
@ceedling[:cacheinator].cache_test_config( @ceedling[:setupinator].config_hash )
@ceedling[BULLSEYE_SYM].enableBullseye(false)
end
}

View File

View File

@@ -0,0 +1,16 @@
require 'ceedling/plugin'
require 'ceedling/streaminator'
require 'ceedling/constants'
class ColourReport < Plugin
def setup
@ceedling[:stream_wrapper].stdout_override(&ColourReport.method(:colour_stdout))
end
def self.colour_stdout(string)
require 'colour_reporter.rb'
report string
end
end

View File

@@ -0,0 +1,53 @@
ceedling-command-hooks
======================
Plugin for easily calling command line tools at various points in the build process
Define any of these sections in :tools: to provide additional hooks to be called on demand:
```
:pre_mock_generate
:post_mock_generate
:pre_runner_generate
:post_runner_generate
:pre_compile_execute
:post_compile_execute
:pre_link_execute
:post_link_execute
:pre_test_fixture_execute
:pre_test
:post_test
:pre_release
:post_release
:pre_build
:post_build
```
Each of these tools can support an :executable string and an :arguments list, like so:
```
:tools:
:post_link_execute:
:executable: objcopy.exe
:arguments:
- ${1} #This is replaced with the executable name
- output.srec
- --strip-all
```
You may also specify an array of executables to be called in a particular place, like so:
```
:tools:
:post_test:
- :executable: echo
:arguments: "${1} was glorious!"
- :executable: echo
:arguments:
- it kinda made me cry a little.
- you?
```
Please note that it varies which arguments are being parsed down to the
hooks. For now see `command_hooks.rb` to figure out which suits you best.
Happy Tweaking!

View File

@@ -0,0 +1,92 @@
require 'ceedling/plugin'
require 'ceedling/constants'
class CommandHooks < Plugin
attr_reader :config
def setup
@config = {
:pre_mock_generate => ((defined? TOOLS_PRE_MOCK_GENERATE) ? TOOLS_PRE_MOCK_GENERATE : nil ),
:post_mock_generate => ((defined? TOOLS_POST_MOCK_GENERATE) ? TOOLS_POST_MOCK_GENERATE : nil ),
:pre_runner_generate => ((defined? TOOLS_PRE_RUNNER_GENERATE) ? TOOLS_PRE_RUNNER_GENERATE : nil ),
:post_runner_generate => ((defined? TOOLS_POST_RUNNER_GENERATE) ? TOOLS_POST_RUNNER_GENERATE : nil ),
:pre_compile_execute => ((defined? TOOLS_PRE_COMPILE_EXECUTE) ? TOOLS_PRE_COMPILE_EXECUTE : nil ),
:post_compile_execute => ((defined? TOOLS_POST_COMPILE_EXECUTE) ? TOOLS_POST_COMPILE_EXECUTE : nil ),
:pre_link_execute => ((defined? TOOLS_PRE_LINK_EXECUTE) ? TOOLS_PRE_LINK_EXECUTE : nil ),
:post_link_execute => ((defined? TOOLS_POST_LINK_EXECUTE) ? TOOLS_POST_LINK_EXECUTE : nil ),
:pre_test_fixture_execute => ((defined? TOOLS_PRE_TEST_FIXTURE_EXECUTE) ? TOOLS_PRE_TEST_FIXTURE_EXECUTE : nil ),
:post_test_fixture_execute => ((defined? TOOLS_POST_TEST_FIXTURE_EXECUTE) ? TOOLS_POST_TEST_FIXTURE_EXECUTE : nil ),
:pre_test => ((defined? TOOLS_PRE_TEST) ? TOOLS_PRE_TEST : nil ),
:post_test => ((defined? TOOLS_POST_TEST) ? TOOLS_POST_TEST : nil ),
:pre_release => ((defined? TOOLS_PRE_RELEASE) ? TOOLS_PRE_RELEASE : nil ),
:post_release => ((defined? TOOLS_POST_RELEASE) ? TOOLS_POST_RELEASE : nil ),
:pre_build => ((defined? TOOLS_PRE_BUILD) ? TOOLS_PRE_BUILD : nil ),
:post_build => ((defined? TOOLS_POST_BUILD) ? TOOLS_POST_BUILD : nil ),
:post_error => ((defined? TOOLS_POST_ERROR) ? TOOLS_POST_ERROR : nil ),
}
@plugin_root = File.expand_path(File.join(File.dirname(__FILE__), '..'))
end
def pre_mock_generate(arg_hash); run_hook(:pre_mock_generate, arg_hash[:header_file] ); end
def post_mock_generate(arg_hash); run_hook(:post_mock_generate, arg_hash[:header_file] ); end
def pre_runner_generate(arg_hash); run_hook(:pre_runner_generate, arg_hash[:source ] ); end
def post_runner_generate(arg_hash); run_hook(:post_runner_generate, arg_hash[:runner_file] ); end
def pre_compile_execute(arg_hash); run_hook(:pre_compile_execute, arg_hash[:source_file] ); end
def post_compile_execute(arg_hash); run_hook(:post_compile_execute, arg_hash[:object_file] ); end
def pre_link_execute(arg_hash); run_hook(:pre_link_execute, arg_hash[:executable] ); end
def post_link_execute(arg_hash); run_hook(:post_link_execute, arg_hash[:executable] ); end
def pre_test_fixture_execute(arg_hash); run_hook(:pre_test_fixture_execute, arg_hash[:executable] ); end
def post_test_fixture_execute(arg_hash); run_hook(:post_test_fixture_execute, arg_hash[:executable] ); end
def pre_test(test); run_hook(:pre_test, test ); end
def post_test(test); run_hook(:post_test, test ); end
def pre_release; run_hook(:pre_release ); end
def post_release; run_hook(:post_release ); end
def pre_build; run_hook(:pre_build ); end
def post_build; run_hook(:post_build ); end
def post_error; run_hook(:post_error ); end
private
##
# Run a hook if its available.
#
# :args:
# - hook: Name of the hook to run
# - name: Name of file (default: "")
#
# :return:
# shell_result.
#
def run_hook_step(hook, name="")
if (hook[:executable])
# Handle argument replacemant ({$1}), and get commandline
cmd = @ceedling[:tool_executor].build_command_line( hook, [], name )
shell_result = @ceedling[:tool_executor].exec(cmd[:line], cmd[:options])
end
end
##
# Run a hook if its available.
#
# If __which_hook__ is an array, run each of them sequentially.
#
# :args:
# - which_hook: Name of the hook to run
# - name: Name of file
#
def run_hook(which_hook, name="")
if (@config[which_hook])
@ceedling[:streaminator].stdout_puts("Running Hook #{which_hook}...", Verbosity::NORMAL)
if (@config[which_hook].is_a? Array)
@config[which_hook].each do |hook|
run_hook_step(hook, name)
end
elsif (@config[which_hook].is_a? Hash)
run_hook_step( @config[which_hook], name )
else
@ceedling[:streaminator].stdout_puts("Hook #{which_hook} was poorly formed", Verbosity::COMPLAINT)
end
end
end
end

View File

@@ -0,0 +1,250 @@
# A Fake Function Framework Plug-in for Ceedling
This is a plug-in for [Ceedling](https://github.com/ThrowTheSwitch/Ceedling) to use the [Fake Function Framework](https://github.com/meekrosoft/fff) for mocking instead of CMock.
Using fff provides less strict mocking than CMock, and allows for more loosely-coupled tests.
And, when tests fail -- since you get the actual line number of the failure -- it's a lot easier to figure out what went wrong.
## Installing the plug-in
To use the plugin you need to 1) get the contents of this repo and 2) configure your project to use it.
### Get the source
The easiest way to get the source is to just clone this repo into the Ceedling plugin folder for your existing Ceedling project.
(Don't have a Ceedling project already? [Here are instructions to create one.](http://www.electronvector.com/blog/try-embedded-test-driven-development-right-now-with-ceedling))
From within `<your-project>/vendor/ceedling/plugins`, run:
`git clone https://github.com/ElectronVector/fake_function_framework.git`
This will create a new folder named `fake_function_framework` in the plugins folder.
### Enable the plug-in.
The plug-in is enabled from within your project.yml file.
In the `:plugins` configuration, add `fake_function_framework` to the list of enabled plugins:
```yaml
:plugins:
:load_paths:
- vendor/ceedling/plugins
:enabled:
- stdout_pretty_tests_report
- module_generator
- fake_function_framework
```
*Note that you could put the plugin source in some other loaction.
In that case you'd need to add a new path the `:load_paths`.*
## How to use it
You use fff with Ceedling the same way you used to use CMock.
Modules can still be generated with the default module generator: `rake module:create[my_module]`.
If you want to "mock" `some_module.h` in your tests, just `#include "mock_some_module.h"`.
This creates a fake function for each of the functions defined in `some_module.h`.
The name of each fake is the original function name with an appended `_fake`.
For example, if we're generating fakes for a stack module with `push` and `pop` functions, we would have the fakes `push_fake` and `pop_fake`.
These fakes are linked into our test executable so that any time our unit under test calls `push` or `pop` our fakes are called instead.
Each of these fakes is actually a structure containing information about how the function was called, and what it might return.
We can use Unity to inspect these fakes in our tests, and verify the interactions of our units.
There is also a global structure named `fff` which we can use to check the sequence of calls.
The fakes can also be configured to return particular values, so you can exercise the unit under test however you want.
The examples below explain how to use fff to test a variety of module interactions.
Each example uses fakes for a "display" module, created from a display.h file with `#include "mock_display.h"`. The `display.h` file must exist and must contain the prototypes for the functions to be faked.
### Test that a function was called once
```c
void
test_whenTheDeviceIsReset_thenTheStatusLedIsTurnedOff()
{
// When
event_deviceReset();
// Then
TEST_ASSERT_EQUAL(1, display_turnOffStatusLed_fake.call_count);
}
```
### Test that a function was NOT called
```c
void
test_whenThePowerReadingIsLessThan5_thenTheStatusLedIsNotTurnedOn(void)
{
// When
event_powerReadingUpdate(4);
// Then
TEST_ASSERT_EQUAL(0, display_turnOnStatusLed_fake.call_count);
}
```
## Test that a single function was called with the correct argument
```c
void
test_whenTheVolumeKnobIsMaxed_thenVolumeDisplayIsSetTo11(void)
{
// When
event_volumeKnobMaxed();
// Then
TEST_ASSERT_EQUAL(1, display_setVolume_fake.call_count);
TEST_ASSERT_EQUAL(11, display_setVolume_fake.arg0_val);
}
```
## Test that calls are made in a particular sequence
```c
void
test_whenTheModeSelectButtonIsPressed_thenTheDisplayModeIsCycled(void)
{
// When
event_modeSelectButtonPressed();
event_modeSelectButtonPressed();
event_modeSelectButtonPressed();
// Then
TEST_ASSERT_EQUAL_PTR((void*)display_setModeToMinimum, fff.call_history[0]);
TEST_ASSERT_EQUAL_PTR((void*)display_setModeToMaximum, fff.call_history[1]);
TEST_ASSERT_EQUAL_PTR((void*)display_setModeToAverage, fff.call_history[2]);
}
```
## Fake a return value from a function
```c
void
test_givenTheDisplayHasAnError_whenTheDeviceIsPoweredOn_thenTheDisplayIsPoweredDown(void)
{
// Given
display_isError_fake.return_val = true;
// When
event_devicePoweredOn();
// Then
TEST_ASSERT_EQUAL(1, display_powerDown_fake.call_count);
}
```
## Fake a function with a value returned by reference
```c
void
test_givenTheUserHasTypedSleep_whenItIsTimeToCheckTheKeyboard_theDisplayIsPoweredDown(void)
{
// Given
char mockedEntry[] = "sleep";
void return_mock_value(char * entry, int length)
{
if (length > strlen(mockedEntry))
{
strncpy(entry, mockedEntry, length);
}
}
display_getKeyboardEntry_fake.custom_fake = return_mock_value;
// When
event_keyboardCheckTimerExpired();
// Then
TEST_ASSERT_EQUAL(1, display_powerDown_fake.call_count);
}
```
## Fake a function with a function pointer parameter
```
void
test_givenNewDataIsAvailable_whenTheDisplayHasUpdated_thenTheEventIsComplete(void)
{
// A mock function for capturing the callback handler function pointer.
void(*registeredCallback)(void) = 0;
void mock_display_updateData(int data, void(*callback)(void))
{
//Save the callback function.
registeredCallback = callback;
}
display_updateData_fake.custom_fake = mock_display_updateData;
// Given
event_newDataAvailable(10);
// When
if (registeredCallback != 0)
{
registeredCallback();
}
// Then
TEST_ASSERT_EQUAL(true, eventProcessor_isLastEventComplete());
}
```
## Helper macros
For convenience, there are also some helper macros that create new Unity-style asserts:
- `TEST_ASSERT_CALLED(function)`: Asserts that a function was called once.
- `TEST_ASSERT_NOT_CALLED(function)`: Asserts that a function was never called.
- `TEST_ASSERT_CALLED_TIMES(times, function)`: Asserts that a function was called a particular number of times.
- `TEST_ASSERT_CALLED_IN_ORDER(order, function)`: Asserts that a function was called in a particular order.
Here's how you might use one of these instead of simply checking the call_count value:
```c
void
test_whenTheDeviceIsReset_thenTheStatusLedIsTurnedOff()
{
// When
event_deviceReset();
// Then
// This how to directly use fff...
TEST_ASSERT_EQUAL(1, display_turnOffStatusLed_fake.call_count);
// ...and this is how to use the helper macro.
TEST_ASSERT_CALLED(display_turnOffStatusLed);
}
```
## Test setup
All of the fake functions, and any fff global state are all reset automatically between each test.
## CMock configuration
Use still use some of the CMock configuration options for setting things like the mock prefix, and for including additional header files in the mock files.
```yaml
:cmock:
:mock_prefix: mock_
:includes:
-
:includes_h_pre_orig_header:
-
:includes_h_post_orig_header:
-
:includes_c_pre_header:
-
:includes_c_post_header:
```
## Running the tests
There are unit and integration tests for the plug-in itself.
These are run with the default `rake` task.
The integration test runs the tests for the example project in examples/fff_example.
For the integration tests to succeed, this repository must be placed in a Ceedling tree in the plugins folder.
## More examples
There is an example project in examples/fff_example.
It shows how to use the plug-in with some full-size examples.

Some files were not shown because too many files have changed in this diff Show More