我需要缩放来自iPhone应用程序中的视图层的图像的分辨率。显而易见的方法是在UIGraphicsBeginImageContextWithOptions中指定一个比例因子,但是只要比例因子不是1.0,图像的质量就会下降到pot --远远超过像素损失的预期。
我尝试了其他几种缩放技术,但它们似乎都围绕着CGContext的东西,而且似乎都做了同样的事情。
简单地改变图像的“大小”(不改变点的分辨率)是不够的,主要是因为这些信息似乎很快就会被管道中的其他人丢弃(图像将被转换为JPG格式并通过电子邮件发送)。
有没有其他方法可以在iPhone上缩放图像?
发布于 2011-05-19 08:27:15
关于 UIImage 调整大小问题,this post提供了许多处理UIImage对象的方法。UIImage存在一些需要修复的方向问题。This和Another post将解决这个问题。
-(UIImage*)resizedImageToSize:(CGSize)dstSize
{
CGImageRef imgRef = self.CGImage;
// the below values are regardless of orientation : for UIImages from Camera, width>height (landscape)
CGSize srcSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef)); // not equivalent to self.size (which is dependant on the imageOrientation)!
/* Don't resize if we already meet the required destination size. */
if (CGSizeEqualToSize(srcSize, dstSize)) {
return self;
}
CGFloat scaleRatio = dstSize.width / srcSize.width;
// Handle orientation problem of UIImage
UIImageOrientation orient = self.imageOrientation;
CGAffineTransform transform = CGAffineTransformIdentity;
switch(orient) {
case UIImageOrientationUp: //EXIF = 1
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored: //EXIF = 2
transform = CGAffineTransformMakeTranslation(srcSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown: //EXIF = 3
transform = CGAffineTransformMakeTranslation(srcSize.width, srcSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored: //EXIF = 4
transform = CGAffineTransformMakeTranslation(0.0, srcSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored: //EXIF = 5
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(srcSize.height, srcSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI_2);
break;
case UIImageOrientationLeft: //EXIF = 6
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(0.0, srcSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI_2);
break;
case UIImageOrientationRightMirrored: //EXIF = 7
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight: //EXIF = 8
dstSize = CGSizeMake(dstSize.height, dstSize.width);
transform = CGAffineTransformMakeTranslation(srcSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
default:
[NSException raise:NSInternalInconsistencyException format:@"Invalid image orientation"];
}
/////////////////////////////////////////////////////////////////////////////
// The actual resize: draw the image on a new context, applying a transform matrix
UIGraphicsBeginImageContextWithOptions(dstSize, NO, self.scale);
CGContextRef context = UIGraphicsGetCurrentContext();
if (!context) {
return nil;
}
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -srcSize.height, 0);
} else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -srcSize.height);
}
CGContextConcatCTM(context, transform);
// we use srcSize (and not dstSize) as the size to specify is in user space (and we use the CTM to apply a scaleRatio)
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, srcSize.width, srcSize.height), imgRef);
UIImage* resizedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return resizedImage;
}发布于 2015-06-17 22:13:33
Swift扩展:
extension UIImage{
// returns a scaled version of the image
func imageScaledToSize(size : CGSize, isOpaque : Bool) -> UIImage{
// begin a context of the desired size
UIGraphicsBeginImageContextWithOptions(size, isOpaque, 0.0)
// draw image in the rect with zero origin and size of the context
let imageRect = CGRect(origin: CGPointZero, size: size)
self.drawInRect(imageRect)
// get the scaled image, close the context and return the image
let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return scaledImage
}
}示例:
aUIImageView.image = aUIImage.imageScaledToSize(aUIImageView.bounds.size, isOpaque : false)如果图像没有alpha,则将isOpaque设置为true :绘制将具有更好的性能。
发布于 2011-05-20 04:24:40
我想出了这个算法来创建一个一半大小的图像:
- (UIImage*) halveImage:(UIImage*)sourceImage {
// Compute the target size
CGSize sourceSize = sourceImage.size;
CGSize targetSize;
targetSize.width = (int) (sourceSize.width / 2);
targetSize.height = (int) (sourceSize.height / 2);
// Access the source data bytes
NSData* sourceData = (NSData*) CGDataProviderCopyData(CGImageGetDataProvider(sourceImage.CGImage));
unsigned char* sourceBytes = (unsigned char *)[sourceData bytes];
// Some info we'll need later
CGBitmapInfo bitmapInfo = CGImageGetBitmapInfo(sourceImage.CGImage);
int bitsPerComponent = CGImageGetBitsPerComponent(sourceImage.CGImage);
int bitsPerPixel = CGImageGetBitsPerPixel(sourceImage.CGImage);
int __attribute__((unused)) bytesPerPixel = bitsPerPixel / 8;
int sourceBytesPerRow = CGImageGetBytesPerRow(sourceImage.CGImage);
CGColorSpaceRef colorSpace = CGImageGetColorSpace(sourceImage.CGImage);
assert(bytesPerPixel == 4);
assert(bitsPerComponent == 8);
// Bytes per row is (apparently) rounded to some boundary
assert(sourceBytesPerRow >= ((int) sourceSize.width) * 4);
assert([sourceData length] == ((int) sourceSize.height) * sourceBytesPerRow);
// Allocate target data bytes
int targetBytesPerRow = ((int) targetSize.width) * 4;
// Algorigthm happier if bytes/row a multiple of 16
targetBytesPerRow = (targetBytesPerRow + 15) & 0xFFFFFFF0;
int targetBytesSize = ((int) targetSize.height) * targetBytesPerRow;
unsigned char* targetBytes = (unsigned char*) malloc(targetBytesSize);
UIImage* targetImage = nil;
// Copy source to target, averaging 4 pixels into 1
for (int row = 0; row < targetSize.height; row++) {
unsigned char* sourceRowStart = sourceBytes + (2 * row * sourceBytesPerRow);
unsigned char* targetRowStart = targetBytes + (row * targetBytesPerRow);
for (int column = 0; column < targetSize.width; column++) {
int sourceColumnOffset = 2 * column * 4;
int targetColumnOffset = column * 4;
unsigned char* sourcePixel = sourceRowStart + sourceColumnOffset;
unsigned char* nextRowSourcePixel = sourcePixel + sourceBytesPerRow;
unsigned char* targetPixel = targetRowStart + targetColumnOffset;
uint32_t* sourceWord = (uint32_t*) sourcePixel;
uint32_t* nextRowSourceWord = (uint32_t*) nextRowSourcePixel;
uint32_t* targetWord = (uint32_t*) targetPixel;
uint32_t sourceWord0 = sourceWord[0];
uint32_t sourceWord1 = sourceWord[1];
uint32_t sourceWord2 = nextRowSourceWord[0];
uint32_t sourceWord3 = nextRowSourceWord[1];
// This apparently bizarre sequence scales the data bytes by 4 so that when added together we'll get an average. We do lose the least significant bits this way, and thus about half a bit of resolution.
sourceWord0 = (sourceWord0 & 0xFCFCFCFC) >> 2;
sourceWord1 = (sourceWord1 & 0xFCFCFCFC) >> 2;
sourceWord2 = (sourceWord2 & 0xFCFCFCFC) >> 2;
sourceWord3 = (sourceWord3 & 0xFCFCFCFC) >> 2;
uint32_t resultWord = sourceWord0 + sourceWord1 + sourceWord2 + sourceWord3;
targetWord[0] = resultWord;
}
}
// Convert the bits to an image. Supposedly CGCreateImage will dispose of the target bytes buffer.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, targetBytes, targetBytesSize, NULL);
CGImageRef targetRef = CGImageCreate(targetSize.width, targetSize.height, bitsPerComponent, bitsPerPixel, targetBytesPerRow, colorSpace, bitmapInfo, provider, NULL, FALSE, kCGRenderingIntentDefault);
targetImage = [UIImage imageWithCGImage:targetRef];
// Clean up
CGColorSpaceRelease(colorSpace);
// Return result
return targetImage;
}我试着每隔一行取一次像素,而不是取平均值,但它得到的图像和默认算法一样糟糕。
https://stackoverflow.com/questions/6052188
复制相似问题