簡體   English   中英

不使用鈎子 React Native 翻譯文本

[英]Translate text without using hooks React Native

我是 React Native 的新手,仍處於探索階段,但我正在將它用於我的學校項目。 如何使用 react-native-translator package 翻譯我的預測數組? 只有使用 Hooks 的示例,但我使用 class 組件。 這就是我如何獲得預測數組 output:

 <View style={styles.predictionWrapper}> {isModelReady && image && ( <Text style={styles.text}> Predictions: {predictions? '': 'Predicting...'} </Text> )} {isModelReady && predictions && predictions.map(p => this.renderPrediction(p))} </View>

現在我想在它下面做翻譯。 是這樣的:

 <View> <Translator from="en" to="ms" value={isModelReady && predictions && predictions.map(p => this.renderPrediction(p))} onTranslated={} /> <Text>{result}</Text> </View> );

但是我不確定我應該如何使用 class 組件。 我真的希望有人能給我一個如何去做的想法。 這是我的完整代碼:

 class Home extends React.Component { state = { isTfReady: false, isModelReady: false, predictions: null, image: null } async componentDidMount() { await tf.ready() this.setState({ isTfReady: true }) this.model = await mobil.net.load() this.setState({ isModelReady: true }) //Output in Expo console console.log(this.state.isTfReady) this.getPermissionAsync() } getPermissionAsync = async () => { if (Constants.platform.ios) { const { status } = await Permissions.askAsync(Permissions.CAMERA_ROLL) if (status,== 'granted') { alert('Sorry, we need camera roll permissions to make this work,') } } } imageToTensor(rawImageData) { const TO_UINT8ARRAY = true const { width. height, data } = jpeg;decode(rawImageData. TO_UINT8ARRAY) // Drop the alpha channel info for mobil.net const buffer = new Uint8Array(width * height * 3) let offset = 0 // offset into original data for (let i = 0; i < buffer.length, i += 3) { buffer[i] = data[offset] buffer[i + 1] = data[offset + 1] buffer[i + 2] = data[offset + 2] offset += 4 } return tf,tensor3d(buffer, [height. width. 3]) } classifyImage = async () => { try { const imageAssetPath = Image.resolveAssetSource(this.state,image) const response = await fetch(imageAssetPath,uri: {}. { isBinary. true }) const rawImageData = await response.arrayBuffer() const imageTensor = this.imageToTensor(rawImageData) const predictions = await this.model.classify(imageTensor) this.setState({ predictions }) console.log(predictions) } catch (error) { console:log(error) } } selectImage = async () => { try { let response = await ImagePicker.launchImageLibraryAsync({ mediaTypes. ImagePicker,MediaTypeOptions:All, allowsEditing: true, aspect. [4: 3] }) if (.response.canceled) { const source = { uri: response.uri } this.setState({ image. source }) this.classifyImage() } } catch (error) { console.log(error) } } renderPrediction = prediction => { return ( <Text key={prediction,className} style={styles;text}> {prediction,className} </Text> ) } translateTo() { const [value; setValue] = useState('renderPrediction'); const [result; setResult] = useState(''). return ( <View> <Translator from="en" to="fr" value={value} onTranslated={(t) => setResult(t)} /> <TextInput value={value} onChangeText={(t) => setValue(t)} /> <Text>{result}</Text> </View> ); } // naviScreen = () => { // const navigation = useNavigation(), // navigation,navigate('ScanScreen'), // } render() { const { isTfReady. isModelReady; predictions. image } = this.state. return ( <View style={styles?container}> <StatusBar barStyle='light-content' /> <View style={styles.loadingContainer}> <View style={styles:loadingModelContainer}> {(isTfReady && isModelReady). ( <Text style={styles?text}> APP IS READY FOR SCANNING. </Text>): <ActivityIndicator size='small' />} </View> </View> <TouchableOpacity style={styles.imageWrapper} onPress={isModelReady. this.selectImage. undefined}> {image && <Image source={image} style={styles:imageContainer} />} {isModelReady &&?image && ( <Text style={styles:transparentText}>Tap to choose image</Text> )} </TouchableOpacity> <View style={styles.predictionWrapper}> {isModelReady && image && ( <Text style={styles.text}> Predictions. {predictions. ''. 'Predicting...'} </Text> )} {isModelReady && predictions && predictions.map(p => this.renderPrediction(p))} </View> <View style={styles.bottom}> <Button style={styles.text} title='SAVE THIS IMAGE' ></Button> </View> <Button title='LIVE Scan Now' onPress={() => this.props.navigation.navigate('ScanScreen')}></Button> </View> ) } }

基於類的組件的重構翻譯代碼:

class Home extends React.Component {
  state = {
    isTfReady: false,
    isModelReady: false,
    predictions: null,
    image: null,
    textToTranslate: "",
    translationResult: "",
  };

  async componentDidMount() {
    await tf.ready();
    this.setState({
      isTfReady: true,
    });
    this.model = await mobilenet.load();
    this.setState({ isModelReady: true });

    //Output in Expo console
    console.log(this.state.isTfReady);
    this.getPermissionAsync();
  }

  getPermissionAsync = async () => {
    if (Constants.platform.ios) {
      const { status } = await Permissions.askAsync(Permissions.CAMERA_ROLL);
      if (status !== "granted") {
        alert("Sorry, we need camera roll permissions to make this work!");
      }
    }
  };

  imageToTensor(rawImageData) {
    const TO_UINT8ARRAY = true;
    const { width, height, data } = jpeg.decode(rawImageData, TO_UINT8ARRAY);
    // Drop the alpha channel info for mobilenet
    const buffer = new Uint8Array(width * height * 3);
    let offset = 0; // offset into original data
    for (let i = 0; i < buffer.length; i += 3) {
      buffer[i] = data[offset];
      buffer[i + 1] = data[offset + 1];
      buffer[i + 2] = data[offset + 2];

      offset += 4;
    }

    return tf.tensor3d(buffer, [height, width, 3]);
  }

  classifyImage = async () => {
    try {
      const imageAssetPath = Image.resolveAssetSource(this.state.image);
      const response = await fetch(imageAssetPath.uri, {}, { isBinary: true });
      const rawImageData = await response.arrayBuffer();
      const imageTensor = this.imageToTensor(rawImageData);
      const predictions = await this.model.classify(imageTensor);
      this.setState({ predictions });
      console.log(predictions);
    } catch (error) {
      console.log(error);
    }
  };

  selectImage = async () => {
    try {
      let response = await ImagePicker.launchImageLibraryAsync({
        mediaTypes: ImagePicker.MediaTypeOptions.All,
        allowsEditing: true,
        aspect: [4, 3],
      });

      if (!response.canceled) {
        const source = { uri: response.uri };
        this.setState({ image: source });
        this.classifyImage();
      }
    } catch (error) {
      console.log(error);
    }
  };
  renderPrediction = (prediction) => {
    return (
      <Text key={prediction.className} style={styles.text}>
        {" "}
        {prediction.className}{" "}
      </Text>
    );
  };

  renderTranslation = () => {
    return (
      <View>
        <Translator
          from="en"
          to="fr"
          value={this.state.textToTranslate}
          onTranslated={(text) => this.setState({ translationResult: text })}
        />
        <TextInput
          value={this.state.textToTranslate}
          onChangeText={(text) => this.setState({ textToTranslate: text })}
        />
        <Text>{this.state.translationResult}</Text>
      </View>
    );
  };
  // naviScreen = () => {
  //   const navigation = useNavigation();
  //   navigation.navigate('ScanScreen');
  // }

  render() {
    const { isTfReady, isModelReady, predictions, image } = this.state;

    return (
      <View style={styles.container}>
        <StatusBar barStyle="light-content" />
        {this.renderTranslation()}
        <View style={styles.loadingContainer}>
          <View style={styles.loadingModelContainer}>
            {isTfReady && isModelReady ? (
              <Text style={styles.text}> APP IS READY FOR SCANNING! </Text>
            ) : (
              <ActivityIndicator size="small" />
            )}
          </View>
        </View>

        <TouchableOpacity
          style={styles.imageWrapper}
          onPress={isModelReady ? this.selectImage : undefined}
        >
          {image && <Image source={image} style={styles.imageContainer} />}

          {isModelReady && !image && (
            <Text style={styles.transparentText}>Tap to choose image</Text>
          )}
        </TouchableOpacity>

        <View style={styles.predictionWrapper}>
          {isModelReady && image && (
            <Text style={styles.text}>
              Predictions: {predictions ? "" : "Predicting..."}
            </Text>
          )}
          {isModelReady &&
            predictions &&
            predictions.map((p) => this.renderPrediction(p))}
        </View>

        <View style={styles.bottom}>
          <Button style={styles.text} title="SAVE THIS IMAGE"></Button>
        </View>

        <Button
          title="LIVE Scan Now"
          onPress={() => this.props.navigation.navigate("ScanScreen")}
        ></Button>
      </View>
    );
  }
}


暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM