What is accuracy limitation in C?

#include <stdio.h>
int main( )
{
    double d = 0;
    int i;

    for (i = 0; i < 10000; i++) {
        d = d + 0.1; // 0.1 here is not precisely 0.1, it is a rough approximation, only precise up to .13
    }

    printf("d is %.12f\n", d); // result is 1000.000000000159 rather than 1000 due to inaccuracies

    return 0;
}