我正在尝试使用谷歌的安卓MLKit来检测CameraX预览中的数字礼物。
然而,我似乎找不到不基于Kotlin或使用现已弃用的Firebase ML Kit的教程。
我的代码如下所示:
public class MainActivity extends AppCompatActivity {
private final int REQUEST_CODE_PERMISSIONS = 10;
private static final String TAG = "CameraXApp";
private String[] REQUIRED_PERMISSIONS = new String[] {Manifest.permission.CAMERA};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
viewFinder = findViewById(R.id.view_finder);
// Request camera permissions
if (allPermissionsGranted()) {
viewFinder.post(startCamera);
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS);
}
// Every time the provided texture view changes, recompute layout
viewFinder.addOnLayoutChangeListener(new View.OnLayoutChangeListener() {
@Override
public void onLayoutChange(
View v, int left, int top, int right, int bottom, int oldLeft, int oldTop,
int oldRight, int oldBottom) {
updateTransform();
}
});
}
private TextureView viewFinder;
private final Runnable startCamera = new Runnable() {
@Override
public void run() {
// Create configuration object for the viewfinder use case
PreviewConfig previewConfig = new PreviewConfig.Builder().build();
// Build the viewfinder use case
Preview preview = new Preview(previewConfig);
// Every time the viewfinder is updated, recompute layout
preview.setOnPreviewOutputUpdateListener(
previewOutput -> {
// To update the SurfaceTexture, we have to remove it and re-add it
ViewGroup parent = (ViewGroup) viewFinder.getParent();
parent.removeView(viewFinder);
parent.addView(viewFinder, 0);
viewFinder.setSurfaceTexture(previewOutput.getSurfaceTexture());
updateTransform();
});
// Create configuration object for the image capture use case
ImageCaptureConfig imageCaptureConfig = new ImageCaptureConfig.Builder()
.setTargetAspectRatio(new Rational(1, 1))
// We don't set a resolution for image capture; instead, we
// select a capture mode which will infer the appropriate
// resolution based on aspect ration and requested mode
.setCaptureMode(ImageCapture.CaptureMode.MIN_LATENCY)
.build();
// Build the image capture use case and attach button click listener
ImageCapture imageCapture = new ImageCapture(imageCaptureConfig);
findViewById(R.id.capture_button).setOnClickListener(view -> {
File file = new File(getExternalMediaDirs()[0], System.currentTimeMillis() + ".jpg");
imageCapture.takePicture(new ImageCapture.OnImageCapturedListener() {
@Override
public void onCaptureSuccess(ImageProxy image, int rotationDegrees) {
super.onCaptureSuccess(image, rotationDegrees);
}
});
});
// Setup image analysis pipeline that computes average pixel luminance
HandlerThread analyzerThread = new HandlerThread("YourAnalyzer");
analyzerThread.start();
ImageAnalysisConfig analyzerConfig =
new ImageAnalysisConfig.Builder()
.setCallbackHandler(new Handler(analyzerThread.getLooper()))
// In our analysis, we care more about the latest image than
// analyzing *every* image
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
.build();
ImageAnalysis analyzerUseCase = new ImageAnalysis(analyzerConfig);
analyzerUseCase.setAnalyzer(new YourAnalyzer());
// Bind use cases to lifecycle
CameraX.bindToLifecycle((LifecycleOwner) MainActivity.this, preview, imageCapture,
analyzerUseCase);
}
};
private void updateTransform() {
Matrix matrix = new Matrix();
float centerX = viewFinder.getWidth() / 2f;
float centerY = viewFinder.getHeight() / 2f;
// Correct preview output to account for display rotation
float rotationDegrees;
switch (viewFinder.getDisplay().getRotation()) {
case Surface.ROTATION_0:
rotationDegrees = 0f;
break;
case Surface.ROTATION_90:
rotationDegrees = 90f;
break;
case Surface.ROTATION_180:
rotationDegrees = 180f;
break;
case Surface.ROTATION_270:
rotationDegrees = 270f;
break;
default:
return;
}
matrix.postRotate(-rotationDegrees, centerX, centerY);
// Finally, apply transformations to our TextureView
viewFinder.setTransform(matrix);
}
/**
* Process result from permission request dialog box, has the request
* been granted? If yes, start Camera. Otherwise display a toast
*/
@Override
public void onRequestPermissionsResult(
int requestCode, String[] permissions, int[] grantResults) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
viewFinder.post(startCamera);
} else {
Toast.makeText(this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT).show();
finish();
}
}
}
private boolean allPermissionsGranted() {
for (String permission : REQUIRED_PERMISSIONS) {
if (ContextCompat.checkSelfPermission(getBaseContext(), permission) !=
PackageManager.PERMISSION_GRANTED) {
return false;
}
}
return true;
}
private static byte[] toByteArray(ByteBuffer buffer) {
buffer.rewind();
byte[] data = new byte[buffer.remaining()];
buffer.get(data);
return data;
}
private class YourAnalyzer implements ImageAnalysis.Analyzer {
@Override
public void analyze(ImageProxy image, int rotationDegrees) {
Image mediaImage = image.getImage();
if (mediaImage != null) {
InputImage inputImage = InputImage.fromMediaImage(mediaImage, rotationDegrees);
TextRecognizer recognizer = TextRecognition.getClient();
Task<Text> result =
recognizer.process(inputImage)
.addOnSuccessListener(new OnSuccessListener<Text>() {
@Override
public void onSuccess(Text visionText) {
// Task completed successfully
Toast.makeText(MainActivity.this, "SUCCESS", Toast.LENGTH_SHORT).show();
processTextBlock(visionText);
}
})
.addOnFailureListener(
new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception e) {
// Task failed with an exception
//Toast.makeText(MainActivity.this, "FAILED", Toast.LENGTH_SHORT).show();
}
});
}
}
}
private void processTextBlock(Text result) {
// [START mlkit_process_text_block]
String resultText = result.getText();
for (Text.TextBlock block : result.getTextBlocks()) {
String blockText = block.getText();
Point[] blockCornerPoints = block.getCornerPoints();
Rect blockFrame = block.getBoundingBox();
for (Text.Line line : block.getLines()) {
String lineText = line.getText();
Point[] lineCornerPoints = line.getCornerPoints();
Rect lineFrame = line.getBoundingBox();
for (Text.Element element : line.getElements()) {
String elementText = element.getText();
Point[] elementCornerPoints = element.getCornerPoints();
Rect elementFrame = element.getBoundingBox();
}
}
}
// [END mlkit_process_text_block]
}}
不幸的是,OnFailureListener可以完美地工作。但是我从来没有实现过OnSuccessListener来工作。有人知道我错过了什么吗?任何帮助都将不胜感激。1:https://developers.google.com/ml-kit/vision/text-recognition/android#3.-process-the-image 2:https://developer.android.com/training/camerax 3:https://firebase.google.com/docs/ml-kit/recognize-text
发布于 2021-03-31 02:10:57
https://stackoverflow.com/questions/66822042
复制相似问题