* For subclasses where the pixels[] buffer isn't set by default, this should * copy all data into the pixels[] array * * @webref pimage:pixels * @webBrief Loads the pixel data for the image into its pixels[] array * @usage web_application */ public void loadPixels() { // ignore if (pixels == null || pixels.length != pixelWidth*pixelHeight) { pixels = new int[pixelWidth*pixelHeight]; } setLoaded(); } public void updatePixels() { // ignore updatePixels(0, 0, pixelWidth, pixelHeight); } /** * * Updates the display window with the data in the pixels[] array. Use in * conjunction with loadPixels(). If you're only reading pixels from the * array, there's no need to call updatePixels() — updating is only * necessary to apply changes. * *
* If the image is in RGB format (i.e. on a PVideo object), * the value will get its high bits set, just to avoid cases where * they haven't been set already. *
* If the image is in ALPHA format, this returns a white with its * alpha value set. *
* This function is included primarily for beginners. It is quite
* slow because it has to check to see if the x, y that was provided
* is inside the bounds, and then has to check to see what image
* type it is. If you want things to be more efficient, access the
* pixels[] array directly.
*
* @webref image:pixels
* @webBrief Reads the color of any pixel or grabs a rectangle of pixels
* @usage web_application
* @param x x-coordinate of the pixel
* @param y y-coordinate of the pixel
* @see PApplet#set(int, int, int)
* @see PApplet#pixels
* @see PApplet#copy(PImage, int, int, int, int, int, int, int, int)
*/
public int get(int x, int y) {
if ((x < 0) || (y < 0) || (x >= pixelWidth) || (y >= pixelHeight)) return 0;
return switch (format) {
case RGB -> pixels[y * pixelWidth + x] | 0xff000000;
case ARGB -> pixels[y * pixelWidth + x];
case ALPHA -> (pixels[y * pixelWidth + x] << 24) | 0xffffff;
default -> 0;
};
}
/**
* @param w width of pixel rectangle to get
* @param h height of pixel rectangle to get
*/
public PImage get(int x, int y, int w, int h) {
int targetX = 0;
int targetY = 0;
int targetWidth = w;
int targetHeight = h;
boolean cropped = false;
if (x < 0) {
w += x; // x is negative, removes the left edge from the width
targetX = -x;
cropped = true;
x = 0;
}
if (y < 0) {
h += y; // y is negative, clip the number of rows
targetY = -y;
cropped = true;
y = 0;
}
if (x + w > pixelWidth) {
w = pixelWidth - x;
cropped = true;
}
if (y + h > pixelHeight) {
h = pixelHeight - y;
cropped = true;
}
if (w < 0) {
w = 0;
}
if (h < 0) {
h = 0;
}
int targetFormat = format;
if (cropped && format == RGB) {
targetFormat = ARGB;
}
PImage target = new PImage(targetWidth / pixelDensity,
targetHeight / pixelDensity,
targetFormat, pixelDensity);
target.parent = parent; // parent may be null so can't use createImage()
if (w > 0 && h > 0) {
getImpl(x, y, w, h, target, targetX, targetY);
}
return target;
}
/**
* Returns a copy of this PImage. Equivalent to get(0, 0, width, height).
* Deprecated, just use copy() instead.
*/
public PImage get() {
// Formerly this used clone(), which caused memory problems.
// https://github.com/processing/processing/issues/81
return get(0, 0, pixelWidth, pixelHeight);
}
public PImage copy() {
return get(0, 0, pixelWidth, pixelHeight);
}
/**
* Internal function to actually handle getting a block of pixels that
* has already been properly cropped to a valid region. That is, x/y/w/h
* are guaranteed to be inside the image space, so the implementation can
* use the fastest possible pixel copying method.
*/
protected void getImpl(int sourceX, int sourceY,
int sourceWidth, int sourceHeight,
PImage target, int targetX, int targetY) {
int sourceIndex = sourceY*pixelWidth + sourceX;
int targetIndex = targetY*target.pixelWidth + targetX;
for (int row = 0; row < sourceHeight; row++) {
System.arraycopy(pixels, sourceIndex, target.pixels, targetIndex, sourceWidth);
sourceIndex += pixelWidth;
targetIndex += target.pixelWidth;
}
}
/**
*
* Changes the color of any pixel or writes an image directly into the
* display window.
*
* The x and y parameters specify the pixel to change and the
* color parameter specifies the color value. The color parameter is
* affected by the current color mode (the default is RGB values from 0 to
* 255). When setting an image, the x and y parameters define
* the coordinates for the upper-left corner of the image, regardless of
* the current imageMode().
*
* Setting the color of a single pixel with set(x, y) is easy, but
* not as fast as putting the data directly into pixels[]. The
* equivalent statement to set(x, y, #000000) using pixels[]
* is pixels[y*width+x] = #000000. See the reference for
* pixels[] for more information.
*
* @webref image:pixels
* @webBrief Writes a color to any pixel or writes an image into another
* @usage web_application
* @param x x-coordinate of the pixel
* @param y y-coordinate of the pixel
* @param c any value of the color datatype
* @see PImage#get(int, int, int, int)
* @see PImage#pixels
* @see PImage#copy(PImage, int, int, int, int, int, int, int, int)
*/
public void set(int x, int y, int c) {
if ((x < 0) || (y < 0) || (x >= pixelWidth) || (y >= pixelHeight)) return;
switch (format) {
case RGB -> pixels[y * pixelWidth + x] = 0xff000000 | c;
case ARGB -> pixels[y * pixelWidth + x] = c;
case ALPHA -> pixels[y * pixelWidth + x] = ((c & 0xff) << 24) | 0xffffff;
}
updatePixels(x, y, 1, 1); // slow...
}
/**
*
* Strictly speaking the "blue" value from the source image is
* used as the alpha color. For a fully grayscale image, this
* is correct, but for a color image it's not 100% accurate.
* For a more accurate conversion, first use filter(GRAY)
* which will make the image into a "correct" grayscale by
* performing a proper luminance-based conversion.
*
* @webref image:pixels
* @webBrief Masks part of an image with another image as an alpha channel
* @usage web_application
* @param img image to use as the mask
*/
public void mask(PImage img) {
img.loadPixels();
if (this.pixelWidth != img.pixelWidth || this.pixelHeight != img.pixelHeight) {
if (this.pixelDensity != img.pixelDensity) {
throw new IllegalArgumentException("mask() requires the mask image to have the same pixel size after adjusting for pixelDensity.");
}
else if (this.width != img.width || this.height != img.height) {
throw new IllegalArgumentException("mask() requires the mask image to have the same width and height.");
}
}
mask(img.pixels);
}
//////////////////////////////////////////////////////////////
// IMAGE FILTERS
public void filter(int kind) {
loadPixels();
switch (kind) {
case BLUR:
// TODO write basic low-pass filter blur here
// what does photoshop do on the edges with this guy?
// better yet.. why bother? just use gaussian with radius 1
filter(BLUR, 1);
break;
case GRAY:
if (format == ALPHA) {
// for an alpha image, convert it to an opaque grayscale
for (int i = 0; i < pixels.length; i++) {
int col = 255 - pixels[i];
pixels[i] = 0xff000000 | (col << 16) | (col << 8) | col;
}
format = RGB;
} else {
// Converts RGB image data into grayscale using
// weighted RGB components, and keeps alpha channel intact.
// [toxi 040115]
for (int i = 0; i < pixels.length; i++) {
int col = pixels[i];
// luminance = 0.3*red + 0.59*green + 0.11*blue
// 0.30 * 256 = 77
// 0.59 * 256 = 151
// 0.11 * 256 = 28
int lum = (77*(col>>16&0xff) + 151*(col>>8&0xff) + 28*(col&0xff))>>8;
pixels[i] = (col & ALPHA_MASK) | lum<<16 | lum<<8 | lum;
}
}
break;
case INVERT:
for (int i = 0; i < pixels.length; i++) {
//pixels[i] = 0xff000000 |
pixels[i] ^= 0xffffff;
}
break;
case POSTERIZE:
throw new RuntimeException("Use filter(POSTERIZE, int levels) " +
"instead of filter(POSTERIZE)");
case OPAQUE:
for (int i = 0; i < pixels.length; i++) {
pixels[i] |= 0xff000000;
}
format = RGB;
break;
case THRESHOLD:
filter(THRESHOLD, 0.5f);
break;
// [toxi 050728] added new filters
case ERODE:
erode(); // former dilate(true);
break;
case DILATE:
dilate(); // former dilate(false);
break;
}
updatePixels(); // mark as modified
}
/**
* Filters the image as defined by one of the following modes:
*
* THRESHOLD
* Converts the image to black and white pixels depending on if they
* are above or below the threshold defined by the level parameter.
* The parameter must be between 0.0 (black) and 1.0 (white).
* If no level is specified, 0.5 is used.
*
* GRAY
* Converts any colors in the image to grayscale equivalents. No parameter is
* used.
*
* OPAQUE
* Sets the alpha channel to entirely opaque. No parameter is used.
*
* INVERT
* Sets each pixel to its inverse value. No parameter is used.
*
* POSTERIZE
* Limits each channel of the image to the number of colors specified as the
* parameter. The parameter can be set to values between 2 and 255, but results
* are most noticeable in the lower ranges.
*
* BLUR
* Executes a Gaussian blur with the level parameter specifying the extent of
* the blurring. If no parameter is used, the blur is equivalent to Gaussian
* blur of radius 1. Larger values increase the blur.
*
* ERODE
* Reduces the light areas. No parameter is used.
*
* DILATE
* Increases the light areas. No parameter is used.
*
*
*
* Gaussian blur code contributed by
* Mario Klingemann
*
* @webref image:pixels
* @webBrief Converts the image to grayscale or black and white
* @usage web_application
* @param kind Either THRESHOLD, GRAY, OPAQUE, INVERT, POSTERIZE, BLUR, ERODE,
* or DILATE
* @param param unique for each, see above
*/
public void filter(int kind, float param) {
loadPixels();
switch (kind) {
case BLUR:
if (format == ALPHA)
blurAlpha(param);
else if (format == ARGB)
blurARGB(param);
else
blurRGB(param);
break;
case GRAY:
throw new RuntimeException("Use filter(GRAY) instead of " +
"filter(GRAY, param)");
case INVERT:
throw new RuntimeException("Use filter(INVERT) instead of " +
"filter(INVERT, param)");
case OPAQUE:
throw new RuntimeException("Use filter(OPAQUE) instead of " +
"filter(OPAQUE, param)");
case POSTERIZE:
int levels = (int)param;
if ((levels < 2) || (levels > 255)) {
throw new RuntimeException("Levels must be between 2 and 255 for " +
"filter(POSTERIZE, levels)");
}
int levels1 = levels - 1;
for (int i = 0; i < pixels.length; i++) {
int rlevel = (pixels[i] >> 16) & 0xff;
int glevel = (pixels[i] >> 8) & 0xff;
int blevel = pixels[i] & 0xff;
rlevel = (((rlevel * levels) >> 8) * 255) / levels1;
glevel = (((glevel * levels) >> 8) * 255) / levels1;
blevel = (((blevel * levels) >> 8) * 255) / levels1;
pixels[i] = ((0xff000000 & pixels[i]) |
(rlevel << 16) |
(glevel << 8) |
blevel);
}
break;
case THRESHOLD: // greater than or equal to the threshold
int thresh = (int) (param * 255);
for (int i = 0; i < pixels.length; i++) {
int max = Math.max((pixels[i] & RED_MASK) >> 16,
Math.max((pixels[i] & GREEN_MASK) >> 8,
(pixels[i] & BLUE_MASK)));
pixels[i] = (pixels[i] & ALPHA_MASK) |
((max < thresh) ? 0x000000 : 0xffffff);
}
break;
// [toxi20050728] added new filters
case ERODE:
throw new RuntimeException("Use filter(ERODE) instead of " +
"filter(ERODE, param)");
case DILATE:
throw new RuntimeException("Use filter(DILATE) instead of " +
"filter(DILATE, param)");
}
updatePixels(); // mark as modified
}
/**
* Optimized code for building the blur kernel.
* further optimized blur code (approx. 15% for radius=20)
* bigger speed gains for larger radii (~30%)
* added support for various image types (ALPHA, RGB, ARGB)
* [toxi 050728]
*/
protected void buildBlurKernel(float r) {
float maxRadius = Math.min(width, height) / 2.0f;
float maxR = maxRadius / 3.5f;
r = Math.min(r, maxR);
int radius = (int) (r * 3.5f);
if (radius < 1) radius = 1;
if (radius > 248) radius = 248;
if (blurRadius != radius) {
blurRadius = radius;
blurKernelSize = 1 + blurRadius<<1;
blurKernel = new int[blurKernelSize];
blurMult = new int[blurKernelSize][256];
int bk,bki;
int[] bm,bmi;
for (int i = 1, radiusi = radius - 1; i < radius; i++) {
blurKernel[radius+i] = blurKernel[radiusi] = bki = radiusi * radiusi;
bm=blurMult[radius+i];
bmi=blurMult[radiusi--];
for (int j = 0; j < 256; j++)
bm[j] = bmi[j] = bki*j;
}
bk = blurKernel[radius] = radius * radius;
bm = blurMult[radius];
for (int j = 0; j < 256; j++)
bm[j] = bk*j;
}
}
private int safeDivide(int numerator, int denominator) {
return denominator == 0 ? numerator : numerator / denominator;
}
protected void blurAlpha(float r) {
int sum, cb;
int read, ri, ym, ymi, bk0;
int[] b2 = new int[pixels.length];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
//cb = cg = cr = sum = 0;
cb = sum = 0;
read = x - blurRadius;
if (read<0) {
bk0=-read;
read=0;
} else {
if (read >= pixelWidth)
break;
bk0=0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= pixelWidth)
break;
int c = pixels[read + yi];
int[] bm = blurMult[i];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
b2[ri] = safeDivide(cb, sum);
}
yi += pixelWidth;
}
yi = 0;
ym = -blurRadius;
ymi = ym * pixelWidth;
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
cb = sum = 0;
if (ym < 0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= pixelHeight)
break;
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= pixelHeight)
break;
int[] bm = blurMult[i];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += pixelWidth;
}
pixels[x+yi] = safeDivide(cb, sum);
}
yi += pixelWidth;
ymi += pixelWidth;
ym++;
}
}
protected void blurRGB(float r) {
int sum, cr, cg, cb;
int read, ri, ym, ymi, bk0;
int[] r2 = new int[pixels.length];
int[] g2 = new int[pixels.length];
int[] b2 = new int[pixels.length];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
cb = cg = cr = sum = 0;
read = x - blurRadius;
if (read < 0) {
bk0 = -read;
read = 0;
} else {
if (read >= pixelWidth) {
break;
}
bk0 = 0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= pixelWidth) {
break;
}
int c = pixels[read + yi];
int[] bm = blurMult[i];
cr += bm[(c & RED_MASK) >> 16];
cg += bm[(c & GREEN_MASK) >> 8];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
r2[ri] = safeDivide(cr, sum);
g2[ri] = safeDivide(cg, sum);
b2[ri] = safeDivide(cb, sum);
}
yi += pixelWidth;
}
yi = 0;
ym = -blurRadius;
ymi = ym * pixelWidth;
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
cb = cg = cr = sum = 0;
if (ym < 0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= pixelHeight) {
break;
}
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= pixelHeight) {
break;
}
int[] bm = blurMult[i];
cr += bm[r2[read]];
cg += bm[g2[read]];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += pixelWidth;
}
pixels[x+yi] = 0xff000000 | (safeDivide(cr, sum))<<16 | (safeDivide(cg, sum))<<8 | (safeDivide(cb, sum));
}
yi += pixelWidth;
ymi += pixelWidth;
ym++;
}
}
protected void blurARGB(float r) {
int sum, cr, cg, cb, ca;
int /*pixel,*/ read, ri, /*roff,*/ ym, ymi, /*riw,*/ bk0;
int wh = pixels.length;
int[] r2 = new int[wh];
int[] g2 = new int[wh];
int[] b2 = new int[wh];
int[] a2 = new int[wh];
int yi = 0;
buildBlurKernel(r);
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
cb = cg = cr = ca = sum = 0;
read = x - blurRadius;
if (read < 0) {
bk0 = -read;
read = 0;
} else {
if (read >= pixelWidth) {
break;
}
bk0=0;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (read >= pixelWidth) {
break;
}
int c = pixels[read + yi];
int[] bm=blurMult[i];
ca += bm[(c & ALPHA_MASK) >>> 24];
cr += bm[(c & RED_MASK) >> 16];
cg += bm[(c & GREEN_MASK) >> 8];
cb += bm[c & BLUE_MASK];
sum += blurKernel[i];
read++;
}
ri = yi + x;
a2[ri] = safeDivide(ca, sum);
r2[ri] = safeDivide(cr, sum);
g2[ri] = safeDivide(cg, sum);
b2[ri] = safeDivide(cb, sum);
}
yi += pixelWidth;
}
yi = 0;
ym = -blurRadius;
ymi = ym * pixelWidth;
for (int y = 0; y < pixelHeight; y++) {
for (int x = 0; x < pixelWidth; x++) {
cb = cg = cr = ca = sum = 0;
if (ym < 0) {
bk0 = ri = -ym;
read = x;
} else {
if (ym >= pixelHeight) {
break;
}
bk0 = 0;
ri = ym;
read = x + ymi;
}
for (int i = bk0; i < blurKernelSize; i++) {
if (ri >= pixelHeight) {
break;
}
int[] bm=blurMult[i];
ca += bm[a2[read]];
cr += bm[r2[read]];
cg += bm[g2[read]];
cb += bm[b2[read]];
sum += blurKernel[i];
ri++;
read += pixelWidth;
}
pixels[x+yi] = (safeDivide(ca, sum))<<24 | (safeDivide(cr, sum))<<16 | (safeDivide(cg, sum))<<8 | (safeDivide(cb, sum));
}
yi += pixelWidth;
ymi += pixelWidth;
ym++;
}
}
/**
* Generic dilate/erode filter using luminance values
* as decision factor. [toxi 050728]
*/
protected void dilate() { // formerly dilate(false)
int index = 0;
int maxIndex = pixels.length;
int[] outgoing = new int[maxIndex];
// erosion (grow light areas)
while (index < maxIndex) {
int curRowIndex = index;
int maxRowIndex = index + pixelWidth;
while (index < maxRowIndex) {
int orig = pixels[index];
int result = orig;
int idxLeft = index - 1;
int idxRight = index + 1;
int idxUp = index - pixelWidth;
int idxDown = index + pixelWidth;
if (idxLeft < curRowIndex) {
idxLeft = index;
}
if (idxRight >= maxRowIndex) {
idxRight = index;
}
if (idxUp < 0) {
idxUp = index;
}
if (idxDown >= maxIndex) {
idxDown = index;
}
int colUp = pixels[idxUp];
int colLeft = pixels[idxLeft];
int colDown = pixels[idxDown];
int colRight = pixels[idxRight];
// compute luminance
int currLum =
77*(orig>>16&0xff) + 151*(orig>>8&0xff) + 28*(orig&0xff);
int lumLeft =
77*(colLeft>>16&0xff) + 151*(colLeft>>8&0xff) + 28*(colLeft&0xff);
int lumRight =
77*(colRight>>16&0xff) + 151*(colRight>>8&0xff) + 28*(colRight&0xff);
int lumUp =
77*(colUp>>16&0xff) + 151*(colUp>>8&0xff) + 28*(colUp&0xff);
int lumDown =
77*(colDown>>16&0xff) + 151*(colDown>>8&0xff) + 28*(colDown&0xff);
if (lumLeft > currLum) {
result = colLeft;
currLum = lumLeft;
}
if (lumRight > currLum) {
result = colRight;
currLum = lumRight;
}
if (lumUp > currLum) {
result = colUp;
currLum = lumUp;
}
if (lumDown > currLum) {
result = colDown;
// currLum = lumDown; // removed, unused assignment
}
outgoing[index++] = result;
}
}
System.arraycopy(outgoing, 0, pixels, 0, maxIndex);
}
protected void erode() { // formerly dilate(true)
int index = 0;
int maxIndex = pixels.length;
int[] outgoing = new int[maxIndex];
// dilate (grow dark areas)
while (index < maxIndex) {
int curRowIndex = index;
int maxRowIndex = index + pixelWidth;
while (index < maxRowIndex) {
int orig = pixels[index];
int result = orig;
int idxLeft = index - 1;
int idxRight = index + 1;
int idxUp = index - pixelWidth;
int idxDown = index + pixelWidth;
if (idxLeft < curRowIndex) {
idxLeft = index;
}
if (idxRight >= maxRowIndex) {
idxRight = index;
}
if (idxUp < 0) {
idxUp = index;
}
if (idxDown >= maxIndex) {
idxDown = index;
}
int colUp = pixels[idxUp];
int colLeft = pixels[idxLeft];
int colDown = pixels[idxDown];
int colRight = pixels[idxRight];
// compute luminance
int currLum =
77*(orig>>16&0xff) + 151*(orig>>8&0xff) + 28*(orig&0xff);
int lumLeft =
77*(colLeft>>16&0xff) + 151*(colLeft>>8&0xff) + 28*(colLeft&0xff);
int lumRight =
77*(colRight>>16&0xff) + 151*(colRight>>8&0xff) + 28*(colRight&0xff);
int lumUp =
77*(colUp>>16&0xff) + 151*(colUp>>8&0xff) + 28*(colUp&0xff);
int lumDown =
77*(colDown>>16&0xff) + 151*(colDown>>8&0xff) + 28*(colDown&0xff);
if (lumLeft < currLum) {
result = colLeft;
currLum = lumLeft;
}
if (lumRight < currLum) {
result = colRight;
currLum = lumRight;
}
if (lumUp < currLum) {
result = colUp;
currLum = lumUp;
}
if (lumDown < currLum) {
result = colDown;
// currLum = lumDown; // removed, unused assignment
}
outgoing[index++] = result;
}
}
System.arraycopy(outgoing, 0, pixels, 0, maxIndex);
}
//////////////////////////////////////////////////////////////
// COPY
/**
* Copies a region of pixels from one image into another. If the source and
* destination regions aren't the same size, it will automatically resize
* source pixels to fit the specified target region. No alpha information
* is used in the process, however if the source image has an alpha channel
* set, it will be copied as well.
*
* As of release 0149, this function ignores imageMode().
*
* @webref image:pixels
* @webBrief Copies the entire image
* @usage web_application
* @param sx X coordinate of the source's upper left corner
* @param sy Y coordinate of the source's upper left corner
* @param sw source image width
* @param sh source image height
* @param dx X coordinate of the destination's upper left corner
* @param dy Y coordinate of the destination's upper left corner
* @param dw destination image width
* @param dh destination image height
* @see PGraphics#alpha(int)
* @see PImage#blend(PImage, int, int, int, int, int, int, int, int, int)
*/
public void copy(int sx, int sy, int sw, int sh,
int dx, int dy, int dw, int dh) {
blend(this, sx, sy, sw, sh, dx, dy, dw, dh, REPLACE);
}
/**
* @param src an image variable referring to the source image.
*/
public void copy(PImage src,
int sx, int sy, int sw, int sh,
int dx, int dy, int dw, int dh) {
blend(src, sx, sy, sw, sh, dx, dy, dw, dh, REPLACE);
}
//////////////////////////////////////////////////////////////
// BLEND
/**
*
* Blends two color values together based on the blending mode given as the
* MODE parameter. The possible modes are described in the reference
* for the blend() function.
*
*
A useful reference for blending modes and their algorithms can be * found in the SVG * specification.
*It is important to note that Processing uses "fast" code, not * necessarily "correct" code. No biggie, most software does. A nitpicker * can find numerous "off by 1 division" problems in the blend code where * >>8 or >>7 is used when strictly speaking * /255.0 or /127.0 should have been used.
*For instance, exclusion (not intended for real-time use) reads * r1 + r2 - ((2 * r1 * r2) / 255) because 255 == 1.0 * not 256 == 1.0. In other words, (255*255)>>8 is not * the same as (255*255)/255. But for real-time use the shifts * are preferable, and the difference is insignificant for applications * built with Processing.
* * @webref color:creating & reading * @webBrief Blends two color values together based on the blending mode given as the * MODE parameter * @usage web_application * @param c1 the first color to blend * @param c2 the second color to blend * @param mode either BLEND, ADD, SUBTRACT, DARKEST, LIGHTEST, DIFFERENCE, EXCLUSION, MULTIPLY, SCREEN, OVERLAY, HARD_LIGHT, SOFT_LIGHT, DODGE, or BURN * @see PImage#blend(PImage, int, int, int, int, int, int, int, int, int) * @see PApplet#color(float, float, float, float) */ static public int blendColor(int c1, int c2, int mode) { // ignore return switch (mode) { case REPLACE -> c2; case BLEND -> blend_blend(c1, c2); case ADD -> blend_add_pin(c1, c2); case SUBTRACT -> blend_sub_pin(c1, c2); case LIGHTEST -> blend_lightest(c1, c2); case DARKEST -> blend_darkest(c1, c2); case DIFFERENCE -> blend_difference(c1, c2); case EXCLUSION -> blend_exclusion(c1, c2); case MULTIPLY -> blend_multiply(c1, c2); case SCREEN -> blend_screen(c1, c2); case HARD_LIGHT -> blend_hard_light(c1, c2); case SOFT_LIGHT -> blend_soft_light(c1, c2); case OVERLAY -> blend_overlay(c1, c2); case DODGE -> blend_dodge(c1, c2); case BURN -> blend_burn(c1, c2); default -> 0; }; } public void blend(int sx, int sy, int sw, int sh, int dx, int dy, int dw, int dh, int mode) { blend(this, sx, sy, sw, sh, dx, dy, dw, dh, mode); } /** * * Blends a region of pixels into the image specified by the img * parameter. These copies utilize full alpha channel support and a choice * of the following modes to blend the colors of source pixels (A) with the * ones of pixels in the destination image (B):O = OVERLAY(S, D) * * O = 2 * MULTIPLY(D, S) = 2DS for S < 0.5 * O = 2 * SCREEN(D, S) - 1 = 2(S + D - DS) - 1 otherwise **/ private static int blend_hard_light(int dst, int src) { int a = src >>> 24; int s_a = a + (a >= 0x7F ? 1 : 0); int d_a = 0x100 - s_a; int d_r = dst & RED_MASK; int d_g = dst & GREEN_MASK; int d_b = dst & BLUE_MASK; int s_r = src & RED_MASK; int s_g = src & GREEN_MASK; int s_b = src & BLUE_MASK; int r = (s_r < 0x800000) ? s_r * ((d_r >>> 16) + 1) >>> 7 : 0xFF0000 - ((0x100 - (d_r >>> 16)) * (RED_MASK - s_r) >>> 7); int g = (s_g < 0x8000) ? s_g * (d_g + 0x100) >>> 15 : (0xFF00 - ((0x10000 - d_g) * (GREEN_MASK - s_g) >>> 15)); int b = (s_b < 0x80) ? s_b * (d_b + 1) >>> 7 : (0xFF00 - ((0x100 - d_b) * (BLUE_MASK - s_b) << 1)) >>> 8; return min((dst >>> 24) + a, 0xFF) << 24 | ((dst & RB_MASK) * d_a + ((r | b) & RB_MASK) * s_a) >>> 8 & RB_MASK | ((dst & GN_MASK) * d_a + (g & GN_MASK) * s_a) >>> 8 & GN_MASK; } /** * Soft Light (peg top) * O = (1 - D) * MULTIPLY(D, S) + D * SCREEN(D, S) * O = (1 - D) * DS + D * (1 - (1 - D)(1 - S)) * O = 2DS + DD - 2DDS */ private static int blend_soft_light(int dst, int src) { int a = src >>> 24; int s_a = a + (a >= 0x7F ? 1 : 0); int d_a = 0x100 - s_a; int d_r = dst & RED_MASK; int d_g = dst & GREEN_MASK; int d_b = dst & BLUE_MASK; int s_r1 = src & RED_MASK >> 16; int s_g1 = src & GREEN_MASK >> 8; int s_b1 = src & BLUE_MASK; int d_r1 = (d_r >> 16) + (s_r1 < 7F ? 1 : 0); int d_g1 = (d_g >> 8) + (s_g1 < 7F ? 1 : 0); int d_b1 = d_b + (s_b1 < 7F ? 1 : 0); int r = (s_r1 * d_r >> 7) + 0xFF * d_r1 * (d_r1 + 1) - ((s_r1 * d_r1 * d_r1) << 1) & RED_MASK; int g = (s_g1 * d_g << 1) + 0xFF * d_g1 * (d_g1 + 1) - ((s_g1 * d_g1 * d_g1) << 1) >>> 8 & GREEN_MASK; int b = (s_b1 * d_b << 9) + 0xFF * d_b1 * (d_b1 + 1) - ((s_b1 * d_b1 * d_b1) << 1) >>> 16; return min((dst >>> 24) + a, 0xFF) << 24 | ((dst & RB_MASK) * d_a + (r | b) * s_a) >>> 8 & RB_MASK | ((dst & GN_MASK) * d_a + g * s_a) >>> 8 & GN_MASK; } /** * Dodge * O = D / (1 - S) */ private static int blend_dodge(int dst, int src) { int a = src >>> 24; int s_a = a + (a >= 0x7F ? 1 : 0); int d_a = 0x100 - s_a; int r = (dst & RED_MASK) / (256 - ((src & RED_MASK) >> 16)); int g = ((dst & GREEN_MASK) << 8) / (256 - ((src & GREEN_MASK) >> 8)); int b = ((dst & BLUE_MASK) << 8) / (256 - (src & BLUE_MASK)); int rb = (r > 0xFF00 ? 0xFF0000 : ((r << 8) & RED_MASK)) | (b > 0x00FF ? 0x0000FF : b); int gn = (g > 0xFF00 ? 0x00FF00 : (g & GREEN_MASK)); return min((dst >>> 24) + a, 0xFF) << 24 | ((dst & RB_MASK) * d_a + rb * s_a) >>> 8 & RB_MASK | ((dst & GN_MASK) * d_a + gn * s_a) >>> 8 & GN_MASK; } /** * Burn * O = 1 - (1 - A) / B */ private static int blend_burn(int dst, int src) { int a = src >>> 24; int s_a = a + (a >= 0x7F ? 1 : 0); int d_a = 0x100 - s_a; int r = ((0xFF0000 - (dst & RED_MASK))) / (1 + (src & RED_MASK >> 16)); int g = ((0x00FF00 - (dst & GREEN_MASK)) << 8) / (1 + (src & GREEN_MASK >> 8)); int b = ((0x0000FF - (dst & BLUE_MASK)) << 8) / (1 + (src & BLUE_MASK)); int rb = RB_MASK - (r > 0xFF00 ? 0xFF0000 : ((r << 8) & RED_MASK)) - (b > 0x00FF ? 0x0000FF : b); int gn = GN_MASK - (g > 0xFF00 ? 0x00FF00 : (g & GREEN_MASK)); return min((dst >>> 24) + a, 0xFF) << 24 | ((dst & RB_MASK) * d_a + rb * s_a) >>> 8 & RB_MASK | ((dst & GN_MASK) * d_a + gn * s_a) >>> 8 & GN_MASK; } ////////////////////////////////////////////////////////////// // FILE I/O /** * Targa image loader for RLE-compressed TGA files. *
* Rewritten for 0115 to read/write RLE-encoded targa images. * For 0125, non-RLE encoded images are now supported, along with * images whose y-order is reversed (which is standard for TGA files). *
* A version of this function is in MovieMaker.java. Any fixes here * should be applied over in MovieMaker as well. *
* Known issue with RLE encoding and odd behavior in some apps: * https://github.com/processing/processing/issues/2096 * Please help! */ static public PImage loadTGA(InputStream input) throws IOException { // ignore byte[] header = new byte[18]; int offset = 0; do { int count = input.read(header, offset, header.length - offset); if (count == -1) return null; offset += count; } while (offset < 18); /* header[2] image type code 2 (0x02) - Uncompressed, RGB images. 3 (0x03) - Uncompressed, black and white images. 10 (0x0A) - Run-length encoded RGB images. 11 (0x0B) - Compressed, black and white images. (grayscale?) header[16] is the bit depth (8, 24, 32) header[17] image descriptor (packed bits) 0x20 is 32 = origin upper-left 0x28 is 32 + 8 = origin upper-left + 32 bits 7 6 5 4 3 2 1 0 128 64 32 16 8 4 2 1 */ int format = 0; if (((header[2] == 3) || (header[2] == 11)) && // B&W, plus RLE or not (header[16] == 8) && // 8 bits ((header[17] == 0x8) || (header[17] == 0x28))) { // origin, 32 bit format = ALPHA; } else if (((header[2] == 2) || (header[2] == 10)) && // RGB, RLE or not (header[16] == 24) && // 24 bits ((header[17] == 0x20) || (header[17] == 0))) { // origin format = RGB; } else if (((header[2] == 2) || (header[2] == 10)) && (header[16] == 32) && ((header[17] == 0x8) || (header[17] == 0x28))) { // origin, 32 format = ARGB; } if (format == 0) { System.err.println("Unknown .tga file format"); return null; } int w = ((header[13] & 0xff) << 8) + (header[12] & 0xff); int h = ((header[15] & 0xff) << 8) + (header[14] & 0xff); PImage outgoing = new PImage(w, h, format); // where "reversed" means upper-left corner (normal for most of // the modernized world, but "reversed" for the tga spec) //boolean reversed = (header[17] & 0x20) != 0; // https://github.com/processing/processing/issues/1682 boolean reversed = (header[17] & 0x20) == 0; if ((header[2] == 2) || (header[2] == 3)) { // not RLE encoded if (reversed) { int index = (h-1) * w; switch (format) { case ALPHA: for (int y = h-1; y >= 0; y--) { for (int x = 0; x < w; x++) { outgoing.pixels[index + x] = input.read(); } index -= w; } break; case RGB: for (int y = h-1; y >= 0; y--) { for (int x = 0; x < w; x++) { outgoing.pixels[index + x] = input.read() | (input.read() << 8) | (input.read() << 16) | 0xff000000; } index -= w; } break; case ARGB: for (int y = h-1; y >= 0; y--) { for (int x = 0; x < w; x++) { outgoing.pixels[index + x] = input.read() | (input.read() << 8) | (input.read() << 16) | (input.read() << 24); } index -= w; } } } else { // not reversed int count = w * h; switch (format) { case ALPHA: for (int i = 0; i < count; i++) { outgoing.pixels[i] = input.read(); } break; case RGB: for (int i = 0; i < count; i++) { outgoing.pixels[i] = input.read() | (input.read() << 8) | (input.read() << 16) | 0xff000000; } break; case ARGB: for (int i = 0; i < count; i++) { outgoing.pixels[i] = input.read() | (input.read() << 8) | (input.read() << 16) | (input.read() << 24); } break; } } } else { // header[2] is 10 or 11 int index = 0; int[] px = outgoing.pixels; while (index < px.length) { int num = input.read(); boolean isRLE = (num & 0x80) != 0; if (isRLE) { num -= 127; // (num & 0x7F) + 1 int pixel = switch (format) { case ALPHA -> input.read(); case RGB -> 0xFF000000 | input.read() | (input.read() << 8) | (input.read() << 16); case ARGB -> input.read() | (input.read() << 8) | (input.read() << 16) | (input.read() << 24); default -> 0; }; for (int i = 0; i < num; i++) { px[index++] = pixel; if (index == px.length) break; } } else { // write up to 127 bytes as uncompressed num += 1; switch (format) { case ALPHA: for (int i = 0; i < num; i++) { px[index++] = input.read(); } break; case RGB: for (int i = 0; i < num; i++) { px[index++] = 0xFF000000 | input.read() | (input.read() << 8) | (input.read() << 16); //(is.read() << 16) | (is.read() << 8) | is.read(); } break; case ARGB: for (int i = 0; i < num; i++) { px[index++] = input.read() | //(is.read() << 24) | (input.read() << 8) | (input.read() << 16) | (input.read() << 24); //(is.read() << 16) | (is.read() << 8) | is.read(); } break; } } } if (!reversed) { int[] temp = new int[w]; for (int y = 0; y < h/2; y++) { int z = (h-1) - y; System.arraycopy(px, y*w, temp, 0, w); System.arraycopy(px, z*w, px, y*w, w); System.arraycopy(temp, 0, px, z*w, w); } } } input.close(); return outgoing; } /** * Creates a Targa32 formatted byte sequence of specified * pixel buffer using RLE compression. *
* Also figured out how to avoid parsing the image upside-down * (there's a header flag to set the image origin to top-left) * * Starting with revision 0092, the format setting is taken into account: ** As of revision 0100, this function requires an absolute path, * in order to avoid confusion. To save inside the sketch folder, * use the function savePath() from PApplet, or use saveFrame() instead. * As of revision 0116, savePath() is not needed if this object has been * created (as recommended) via createImage() or createGraphics() or * one of its neighbors. *
* As of revision 0115, when using Java 1.4 and later, you can write
* to several formats besides tga and tiff. If Java 1.4 is installed
* and the extension used is supported (usually png, jpg, jpeg, bmp,
* and tiff), then those methods will be used to write the image.
* To get a list of the supported formats for writing, use:
* println(javax.imageio.ImageIO.getReaderFormatNames())
*
* In Processing 4.0 beta 5, the old (and sometimes buggy) TIFF * reader/writer was removed, so ImageIO is used for TIFF files. *
* Also, files must have an extension: we're no longer adding .tif to * files with no extension, because that can lead to confusing results, * and the behavior is inconsistent with the rest of the API. * * @webref pimage:method * @webBrief Saves the image to a TIFF, TARGA, PNG, or JPEG file * @usage application * @param filename a sequence of letters and numbers */ public boolean save(String filename) { // ignore String path; if (parent != null) { // use savePath(), so that the intermediate directories are created path = parent.savePath(filename); } else { File file = new File(filename); if (file.isAbsolute()) { // make sure that the intermediate folders have been created PApplet.createPath(file); path = file.getAbsolutePath(); } else { String msg = "PImage.save() requires an absolute path. " + "Use createImage(), or pass savePath() to save()."; PGraphics.showException(msg); return false; } } return saveImpl(path); } /** * Override this in subclasses to intercept save calls for other formats * or higher-performance implementations. When reaching this code, pixels * must be loaded and that path should be absolute. * * @param path must be a full path (not relative or simply a filename) */ protected boolean saveImpl(String path) { // Make sure the pixel data is ready to go loadPixels(); boolean success; try { final String lower = path.toLowerCase(); if (lower.endsWith(".tga")) { OutputStream os = new BufferedOutputStream(new FileOutputStream(path), 32768); success = saveTGA(os); //, pixels, width, height, format); os.close(); } else { // TODO Imperfect, possibly temporary solution for 4.x releases // https://github.com/processing/processing4/wiki/Exorcising-AWT success = ShimAWT.saveImage(this, path); } } catch (IOException e) { System.err.println("Error while saving " + path); e.printStackTrace(); success = false; } return success; } }