Branch: Tag:

2001-03-17

2001-03-17 14:44:29 by Mirar (Pontus Hagland) <pike@sort.mirar.org>

Image.Layer modes modified (not backwards compatible!)
- presumably fixes [bug 1167 (#1167)] and [bug 1187 (#1187)].

Alpha channel handling for modes add, subtract, multiply, divide,
modulo, invsubtract, invdivide, invmodulo, difference, max, min,
bitwise_and, bitwise_or, bitwise_xor, screen and overlay modified so
that the source layer alpha channel copied to the destination layer
alpha channel rather then mixed with the current layer alpha channel:
aD=aS
rather then the behavior before:
aD=(aL+(1-aL)*aS)
(which is the behavior of the 'normal' mode.)

Bug fixes in the above modes so that alpha r,g,b channels
are handled properly (separate).

Bug fixes in equal, not_equal, less, more, less_or_equal,
more_or_equal modes so that the alpha channel is handled as
documented (which is, in fact, as above; aD=aS).

Bug fix in divide, invdivide to actually do what the modes
are supposed to do (rather then just black the layer).

And while on it, adding some modes:

'imultiply' - D=(1-L)*S
'idivide' - D=S/(1-L)
'invidivide' - D=L/(1-S)

'value_mul' - multiplying the source layer value
(as in h,s,v) with the curreny layer value to the destination layer
("true burn", I guess).

Rev: src/modules/Image/layer_channel.h:1.5
Rev: src/modules/Image/layer_oper.h:1.8
Rev: src/modules/Image/layers.c:1.63

4:    rgb_group *sa,rgb_group *la,rgb_group *da,    int len,double alpha)   { +  union { COLORTYPE r,g,b; } tz,*t=&tz; +    #ifndef L_LOGIC    if (alpha==0.0)    {
15: Inside #if undefined(L_LOGIC)
   }    else if (alpha==1.0)    { + #ifdef L_COPY_ALPHA +  MEMCPY(da,sa,sizeof(rgb_group)*len); + #define da da da da /* error */ + #endif    if (!la) /* no layer alpha => full opaque */    {   #ifdef L_MMX_OPER
81: Inside #if undefined(L_LOGIC)
   d->r=L_TRUNC(L_OPER(s->r,l->r));    d->g=L_TRUNC(L_OPER(s->g,l->g));    d->b=L_TRUNC(L_OPER(s->b,l->b)); -  *da=white; -  l++; s++; sa++; da++; d++; + #ifndef L_COPY_ALPHA +  *da=white; da++; + #endif +  l++; s++; sa++; d++;    }    }    else
93: Inside #if undefined(L_LOGIC)
   d->r=L_TRUNC(L_OPER(s->r,l->r));    d->g=L_TRUNC(L_OPER(s->g,l->g));    d->b=L_TRUNC(L_OPER(s->b,l->b)); + #ifndef L_COPY_ALPHA   #ifdef L_USE_SA    *da=*sa;   #else    *da=white;   #endif -  + #endif    }    else if (la->r==0 && la->g==0 && la->b==0)    {    *d=*s; -  + #ifndef L_COPY_ALPHA    *da=*sa; -  + #endif    }    else    { -  d->r=L_TRUNC(L_OPER(s->r,l->r)); -  ALPHA_ADD(s,d,d,sa,la,da,r); -  d->g=L_TRUNC(L_OPER(s->g,l->g)); -  ALPHA_ADD(s,d,d,sa,la,da,g); -  d->b=L_TRUNC(L_OPER(s->b,l->b)); -  ALPHA_ADD(s,d,d,sa,la,da,b); + #ifndef L_COPY_ALPHA +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD(s,t,d,sa,la,da,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD(s,t,d,sa,la,da,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD(s,t,d,sa,la,da,b);   #ifdef L_USE_SA    *da=*sa;   #endif -  + #else +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD_nA(s,t,d,sa,la,da,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD_nA(s,t,d,sa,la,da,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD_nA(s,t,d,sa,la,da,b); + #endif    } -  l++; s++; la++; sa++; da++; d++; +  l++; s++; la++; sa++; d++; + #ifndef L_COPY_ALPHA +  da++; + #endif    }    }    else    { -  + #ifdef L_COPY_ALPHA + #undef da +  MEMCPY(da,sa,sizeof(rgb_group)*len); + #define da da da + #endif    if (!la) /* no layer alpha => full opaque */    while (len--)    { -  d->r=L_TRUNC(L_OPER(s->r,l->r)); -  ALPHA_ADD_V_NOLA(s,d,d,sa,da,alpha,r); -  d->g=L_TRUNC(L_OPER(s->g,l->g)); -  ALPHA_ADD_V_NOLA(s,d,d,sa,da,alpha,g); -  d->b=L_TRUNC(L_OPER(s->b,l->b)); -  ALPHA_ADD_V_NOLA(s,d,d,sa,da,alpha,b); + #ifndef L_COPY_ALPHA +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD_V_NOLA(s,t,d,sa,da,alpha,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD_V_NOLA(s,t,d,sa,da,alpha,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD_V_NOLA(s,t,d,sa,da,alpha,b);   #ifdef L_USE_SA    *da=*sa;   #endif -  l++; s++; sa++; da++; d++; +  da++; + #else +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD_V_NOLA_nA(s,t,d,sa,da,alpha,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD_V_NOLA_nA(s,t,d,sa,da,alpha,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD_V_NOLA_nA(s,t,d,sa,da,alpha,b); +  + #endif +  l++; s++; sa++; d++;    }    else    while (len--)    { -  d->r=L_TRUNC(L_OPER(s->r,l->r)); -  ALPHA_ADD_V(s,d,d,sa,la,da,alpha,r); -  d->g=L_TRUNC(L_OPER(s->g,l->g)); -  ALPHA_ADD_V(s,d,d,sa,la,da,alpha,g); -  d->b=L_TRUNC(L_OPER(s->b,l->b)); -  ALPHA_ADD_V(s,d,d,sa,la,da,alpha,b); + #ifndef L_COPY_ALPHA +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD_V(s,t,d,sa,la,da,alpha,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD_V(s,t,d,sa,la,da,alpha,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD_V(s,t,d,sa,la,da,alpha,b);   #ifdef L_USE_SA    *da=*sa;   #endif -  l++; s++; la++; sa++; da++; d++; +  da++; + #else +  t->r=L_TRUNC(L_OPER(s->r,l->r)); +  ALPHA_ADD_V_nA(s,t,d,sa,la,da,alpha,r); +  t->g=L_TRUNC(L_OPER(s->g,l->g)); +  ALPHA_ADD_V_nA(s,t,d,sa,la,da,alpha,g); +  t->b=L_TRUNC(L_OPER(s->b,l->b)); +  ALPHA_ADD_V_nA(s,t,d,sa,la,da,alpha,b); + #undef da + #endif +  l++; s++; la++; sa++; d++;    }    }   #else /* L_LOGIC */
165:    *da=*d=L_LOGIC(L_OPER(s->r,l->r),    L_OPER(s->g,l->g),    L_OPER(s->b,l->b)); -  l++; s++; sa++; da++; d++; +  l++; s++; sa++; d++; da++;    }    else    while (len--)    {    if (la->r==0 && la->g==0 && la->b==0) -  *d=*da=L_TRANS; +  *da=*d=L_TRANS;    else    *da=*d=L_LOGIC(L_OPER(s->r,l->r),    L_OPER(s->g,l->g),    L_OPER(s->b,l->b)); -  l++; s++; la++; sa++; da++; d++; +  l++; s++; la++; sa++; d++; da++;    }    }   #endif /* L_LOGIC */   }