我正在尝试使用核心检测器获取图像上的文本区。
- (NSArray *)detectWithImage:(UIImage *)img
{
// prepare CIImage
CIImage *image = [CIImage imageWithCGImage:img.CGImage];
// flip vertically
CIFilter *filter = [CIFilter filterWithName:@"CIAffineTransform"];
[filter setValue:image forKey:kCIInputImageKey];
CGAffineTransform t = CGAffineTransformMakeTranslation(0, CGRectGetHeight(image.extent));
t = CGAffineTransformScale(t, 1.0, -1.0);
[filter setValue:[NSValue valueWithCGAffineTransform:t] forKey:kCIInputTransformKey];
image = filter.outputImage;
// prepare CIDetector
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeText
context:nil
options:@{
CIDetectorAccuracy: CIDetectorAccuracyHigh}];
// retrive array of CITextFeature
NSArray *features = [detector featuresInImage:image
options:@{CIDetectorReturnSubFeatures: @YES}];
return features;
}传入的图片为:

我没有从这张图片中得到任何东西。我也尝试了彩色图像,也没有翻转图像。
有人能告诉我正确的方向吗?
谢谢!
发布于 2018-03-13 02:58:09
您应该检查以确保传递给您的函数的UIImage和img.CGImage不是nil,因为您的其余代码似乎没有问题,尽管翻转并不是必需的。例如:
UIImageView *imageView = [[UIImageView alloc] initWithImage: img];
CIImage *image = [CIImage imageWithCGImage:img.CGImage];
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeText
context:nil
options:@{
CIDetectorAccuracy: CIDetectorAccuracyHigh}];
// retrive array of CITextFeature
NSArray *features = [detector featuresInImage:image options:@{CIDetectorReturnSubFeatures: @YES}];
for(CITextFeature *feature in features) {
UIView *view = [[UIView alloc] initWithFrame: CGRectMake(feature.bounds.origin.x, image.size.height - fear.bounds.origin.y - feature.bounds.height, fear.bounds.width, feature.bounds.height)];
view.backgroundColor = [[UIColor redColor] colorWithAlphaComponent: 0.25];
[imageView addSubview: view];
}生成结果:

其中红色突出显示表示从CIDetector返回的边界
https://stackoverflow.com/questions/49241949
复制相似问题