Package a – javascript mocking framework

Me and Adlan Elmurzajev have just published https://npmjs.org/package/a – a javascript mocking framework for nodejs. It’s called a,  and is installed by npm . In addition to regular mocking, it can also stub require() – that means you can do proper unit testing by isolating the dependencies. The mocking framework can be used in any JavaScript testing framework.

(The package also has a unit test framework in bdd style – this is not fully documented yet, but you can find examples in  this repo: https://bitbucket.org/pure/a_demo . When the test framework is fully  documented, I will write a blog post about it.)

Key features

  • partial mock (fallback)
  • strict mock
  • mocking require()
  • object mock
  • multiple expects / returns
  • array expects
  • repeats: nTimes or infinite
  • verify
  • interceptors (whenCalled)

Mocking a function

partial mock

var original = function() {
    return 'realValue';
}

var mock = require('a').mock(original);
original = mock;
mock.expect().return('fake');

original(); //returns 'fake'
original(); //returns 'realValue'

strict mock

var original = function() {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect().return('fake');

original(); //returns 'fake'
original(); //throws unexpected arguments

strict mock with arguments

var original = function(arg) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('testValue1').return('fake1');
mock.expect('testValue2').return('fake2');

original('testValue1'); //returns 'fake1'
original('testValue2'); //returns 'fake2'
original(); //throws unexpected arguments
original('foo'); //throws unexpected arguments

strict mock with multiple arguments

var original = function(arg1, arg2) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('firstArg1', 'secondArg1').return('fake1');
mock.expect('firstArg2', 'secondArg2').return('fake2');

original('firstArg1', 'secondArg1'); //returns 'fake1'
original('firstArg2', 'secondArg2'); //returns 'fake2'
original('foo'); //throws unexpected arguments
original('foo', 'bar'); //throws unexpected arguments

strict mock expecting array

var original = function(array) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expectArray(['a','b']).return('fake1');
mock.expectArray(['a','b').return('fake2');
mock.expectArray(['c','d').return('fake3');

original(['a','b']); //returns 'fake1'
original(['a','b']); //returns 'fake2'
original(['c','d']); //returns 'fake3'
original(['a','b']); //throws unexpected arguments
original(['foo', 'bar']); //throws unexpected arguments

strict mock with repeats

var original = function() {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect().return('fake').repeat(2);

original(); //returns 'fake'
original(); //returns 'fake'
original(); //throws unexpected arguments

strict mock with infinite repeats

var original = function() {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect().return('fake').repeatAny();

original(); //returns 'fake'
original(); //returns 'fake'
original(); //returns 'fake'...

strict mock ignoring arguments

var original = function(arg) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expectAnything().return('fake1');

original('someRandomValue'); //returns 'fake1'
original(); //throws unexpected arguments

strict mock with interceptor

var original = function(arg) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('testValue').whenCalled(onCalled).return('fake1');

function onCalled(arg) {
    //arg == 'testValue'
}

original('testValue'); //returns 'fake1'
original(); //throws unexpected arguments

strict mock – verify (fail)

var original = function(arg) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('testValue1').return('fake1');
mock.expect('testValue2').return('fake2');

original('testValue1'); //returns 'fake1'
mock.verify(); //throws mock has 1 pending functions

strict mock – verify (success)

var original = function(arg) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('testValue1').return('fake1');
mock.expect('testValue2').return('fake2');

original('testValue1'); //returns 'fake1'
original('testValue2'); //returns 'fake2'
mock.verify(); //returns true

strict mock – advanced scenario

var original = function(arg, callback) {
    return 'realValue';
}

var mock = require('a').mock();
original = mock;
mock.expect('testValue').expectAnything().whenCalled(onCalled).return('fake1');

function onCalled(arg,callback) {
    //arg == 'testValue'
    //callback == foo
}

function foo() {    
}

original('testValue', foo); //returns 'fake1'
mock.verify() //returns true
original('testValue',foo); //throws unexpected arguments

Mocking require

expectRequire

var fakeDep = {};

var expectRequire = require('a').expectRequire;
expectRequire('./realDep').return(fakeDep);

require('./realDep'); //returns fakeDep
require('./realDep'); //returns realDep (behaves like a partial mock)

requireMock (compact syntax)

var requireMock = require('a').requireMock;
var fakeDep = requireMock('./realDep'); //returns a strict mock

require('./realDep'); //returns fakeDep
require('./realDep'); //returns realDep

..is equivalent to ..

var mock = require('a').mock();
var expectRequire = require('a').expectRequire;

var fakeDep = mock; 
expectRequire('./realDep').return(fakeDep);

require('./realDep'); //returns fakeDep
require('./realDep'); //returns realDep

Mocking an object

partial object mock

function newCustomer(_name) {
    var c = {};

    c.getName = function () 
    {
        return _name;
    };

    return c;
}

var customer = newCustomer('Alfonzo The Real');
var customerMock = mock(customer);

customerMock.getName.expect().return('Johnny Fake');

customer.getName(); //returns Johnny Fake
customer.getName(); //returns Alfonzo The Real
customerMock.verify(); //returns true
Posted in code, javascript | Tagged , , , , | Leave a comment

10 Reasons why you SHOULD write your own DAL

 Intro

Usually, you see posts with the opposite header: 10 reasons why you should not create your own DAL / ORM. And there are certainly lots of legitimate reasons why you should not,  but in some cases there are some very strong reasons for writing your own Data Access Layer / Object Relational Mapper. We just did, at Timpex , and we are very proud of the result. Most of the time, I was the only developer on the project. Pair programming was not possible at this moment of time because of physical obstacles – I was isolated at a new department office in another city.  In the later phase,  Adlan Elmurzajev, another coworker joined the project , and we placed the final bricks. I was given time and resources by the management to implement the DAL. It was initiated by me and the leader of the development team.

I needed to change the coding strategy after three weeks. I had an idea to start out with some basic sql stuff with joins, then write some integration tests. And for the pure composition of sql I wanted to go with test-first. However, as things went on, code got complex, and the iterations between new integration tests took longer and longer time. I deciced to change strategy – I threw all existing code “out of the window” and started over again – but this time entirely with TDD. I realized that integration tests did not give much value at this point – because they didn’t drive the design. I felt a lot more comfortable with tests first, and the design evolved quite nicely. Though, I had got get used to idea that I wasn’t going to to touch the database for a long, long time…actually not for two months. However, this felt rather like a relief than a frustration. But enough intro-talk for now, here are my 10 reasons:

1. Because you are dealing with a legacy database. In our case we had to work against a legacy database with some structures from 1987. The database technology itself was not legacy, but the actual database schema was. It had no physical relations, only logical (programmatic) relations. Some relations were a bit odd, and impossible to represent with third party ORM  (without hacks) like NHibernate or Entity Framework. Db-null were handled zeros in integer columns and by String.Empty in string columns. Datetime columns were not present, instead datetime was represented by integers in different formats: YYYYMMDD, HHmmSS or just  HHmm. Dates that represented both day and time of day were represented by composition of two integer columns: YYYYMMDD +  HHmmSS. In some cases the same table was used for different concepts, but with no columnDiscriminator. E.g. we had an Order Table that was used for both ‘Order’ and  ‘Order Template’ – and they were seperated by two OrderNo ranges: regular orders in range xxxx..yyyy and template orders in range yyyy..zzzz – we called this a Formula Discriminator. The ranges themself were kept in a Range Table. So why didn’t we just upgrade the customer databases  with migration scripts ? Well, first of all we have a lot of customers running – at least 200. Secondly, the major part of the program are not written in c#, they are written in a 4GL language – and unfortunately with no single-point-of-entity-creation – db-inserts and  updates are scattered and duplicated in the 4GL code. We have a long term strategy to port /rewrite all of our code in c#, but this is not going to happen overnight. Until then, c#, 4GL and the legacy database must happily live together.

 2. Because you don’t want to spend days searching for answers at forums (like hibernate.org or adodotnetentityframework).

3. Because you don’t wanna post topics (at nibernate.org) that no-one’s gonna answer anyway.

4. Because you don’t want to to buy or spend time with a profiler. Lately, profilers have gained a lot of popularity. Developers need profilers to find out why their ORM is so slow / chatty against the database. This is usually caused by lack of fetching strategy and relying on lazy loading instead. In our custom dal, we have an option to eagerly load entities  any level. Let’s say you have an order with orderline, orderline with package and package with items. If your service knows it’s gonna need all levels down to Item, to do it’s computation, it should tell the FetchingService to do so: FetchingService.IncludeOrderLines().IncludePackage().IncludeItems() . This  feature is often poorly implemented in third party ORMs – e.g. NHibernate offers a poor fetching strategy. Entity Framework does have a quite good fetchingstrategy by the way.

5. Because you want to give better estimates. When I give estimates to the the project team leder I find the most jeopardy part to be the ORM part. This has always has been a pain in the ass, and the ORM seem to be the breaking part of a tight schedule every time.

6. Because you don’t want to do mapping with a Designer. Using a designer (e.g. Entity Framework) is code-generation – which is one of the seven blind alleys in software design. It works very bad with code migrations and a legacy database with no physical relations. It also works bad with code first (TDD) approach.

7. Because you don’t want to map with xml. We are programmers and we can code.  I am sure lots of you have been messing around with nhibernate mapping files. And, yes, I know Nhibernate supports fluent syntax. Problem is, the fluent syntax is poorly documented and there is not a one-to-one correspondence with xml mapping and fluent mapping.

8. Because you want to apply Dependency Injection to Entities. DI in entities is generally not recommended when working with an ORM. E.g. If you want to apply DI to NHibernate you would need to do some AOP (aspect oriented programming) to “interrupt” the ORM code and run som buildup mechanisms with the Container (IOC) – both when retrieving from the database and when creating new entities. In my eyes, AOP, is just a hack to avoid a bigger hack. The recommended approach is to let the caller pass the dependant classes to the entity. In my opinion, this is just another ugly workaround that breaks encapsulation / facade principle and single responsibility principle.

9. Because you write an understandable API – not a framework. Since you write your dal with TDD, you write no more code than you need. And you know exactly how your code behaves. You don’t write a general-purpose frameowork with lots of overloads and options. This means code changes and mapping new concepts will be easy. And you don’t need to be messing with  dll version conflicts you often have when relying on SomeOrm.dll thath depends on someotherFramework.dll.

10. Because you will learn a lot. When writing a custom DAL you will get to know transaction, locking strategies, unit of work concept, hashcode, equality and caching. These are concepts that you should ought to know anyhow. And most important of all: you will become better at TDD – because writing a DAL is indeed complex and TDD is the only way to go when implementing a complex system.

Happy Easter, everybody !

Posted in Uncategorized | Leave a comment

Tennis Kata with state transition tree

I have just published my specs of the Tennis Kata at github. I am using our inhouse infrastructure for unit testing. This means separating the Act from the Assert by having two different classes. We also use a inheritance in a very simple given-then-tree. This let us explore different permutations of method calls, and after each method call we can do assertions. It is a kind of storytelling, but without all the noise that acceptance testing frameworks usually have.  We start with a Construct_Act that creates the class under test. Then creating a When_Constructed class that inherits from the Act – this will contain the asserts. Further on we make a PlayerOneWinBall_Act that inherits from Construct_Act. Then creating a When_PlayerOneWinBall : PlayerOneWinBall_Act class that observes and asserts. This gives a nice given-then-tree with namespaces as the Given-statement. It certainly is a lot of classes, but each class is dead simple and we are not violating Single Responsibility Principle for the test class itself. Each ‘Act’ and ‘When’ class has only one context with high cohesion. Another pro is that we only construct the class under test once – so changing the constructor does not lead to massive refactor in test code. For readability I have also created some empty act classes (e.g. At15_0_Act) . We really don’t need them, but it is easier to read ‘class PlayerTwoWinBall_Act : At15_0_Act’ than ‘class PlayerTwoWinBall_Act : PlayerOneWinBall_Act’.

Because stupid wordpress doesn’t allow plugins, I published a typical Act-class at pastie.org.

The given-then-tree, after all it’s all about state transitions:

Posted in code | Tagged , , | Leave a comment

Presenter meets Visitor

Here are the slides from my presentation at NNUG 30.09.2010. (First slide in Norwegian, rest in English) . It is from a real world experience when presenting heterogenous data in a grid. The example walkthrough uses GOF patterns and Single Responsibility to improve the code quality. It starts out with a naive approach and does stepwise refactorings. We get to know the Presenter and the Visitor pattern – which fully takes usage of OO and static languages. The combination of Presenter and Visitor is indeed powerful.

Posted in code | Tagged | 9 Comments

Ny visning 16. november

For de som ikke fikk med seg visninga i helga, vil det være en ny visning tirsdag 16. november kl. 15.00 – 16.00.

Finn-kode: 25421438

Finn-link: http://www.finn.no/finn/realestate/homes/object?finnkode=25421438

Posted in privat | Leave a comment

Vi vurderer å selge huset på Åndalsnes

Huset ligger i Romsdalsveien 79. Byggeår 1987. Godt vedlikeholdt, solrik beliggenhet, flott utsikt med skjermet hage. Ta kontakt på epost lroal@hotmail.com eller tlf 977 64 916 hvis aktuelt. Det vil være en visning i dag, 09.november  kl. 19.30. Ellers visning etter avtale.

Posted in privat | 2 Comments