簡體   English   中英

方法或委托與其他委托參數不匹配

[英]A method or delegate does not match other delegate parameters

我正在為Google Science Fair創建一個AI系統,但遇到了麻煩,似乎找不到標題中所述問題的錯誤。 在Google上進行的搜索在以下主題中返回空答案。 我正在使用Monodevelop;

這是我的代碼:

    using UnityEngine;
using System;
using System.Collections;
using System.Collections.Generic;
using System.ComponentModel;
using System.ComponentModel.Design;

namespace NeuralNetwork {
public class DecisionMaking {

    public static string DecideString(List<string> choice){
                    choice.ToArray ();
                    int mychoice = UnityEngine.Random.Range (0, choice.Count);
                    return choice [mychoice];
            }

    public static Vector3 DecideVector(List<Vector3> choice){
        choice.ToArray ();
        int mychoice = UnityEngine.Random.Range (0, choice.Count);
        return choice [mychoice];
    }

}

public class Network {

    private int num_in;
    private int num_hid;
    private int num_out;

    private double[,] i_to_h_wts;
    private double[,] h_to_o_wts;
    private double[] inputs;
    private double[] hidden;
    private double[] outputs;
    private double learningRate = 0.3;

    private System.Random gen = new System.Random();

    public static bool isTraining;
    public delegate void ChangeHandler (System.Object sender, EventArgs nne);
    public event ChangeHandler Change;

    #region Constructor

    public Network (int num_in,int num_hid, int num_out){
        this.num_in = num_in;
        this.num_hid = num_hid;
        this.num_out = num_out;

        i_to_h_wts = new double[num_in + 1, num_hid];
        h_to_o_wts = new double[num_hid + 1, num_out];

        inputs = new double[num_in + 1];
        hidden = new double[num_hid + 1];
        outputs = new double[num_out];
    }

    #endregion

    public void initializeNetwork() {
                    inputs [num_in] = 1.0;
                    inputs [num_hid] = 1.0;

                    for (int i = 0; i < num_in + 1; i++) {
                            for (int j = 0; j < num_hid; j++) {
                                    i_to_h_wts [i, j] = (gen.NextDouble () * 4) - 2;
                            }
                    }

                    for (int i = 0; i < num_hid +1; i++) {
                            for (int j = 0; j < num_out; j++) {
                                    h_to_o_wts [i, j] = (gen.NextDouble () * 4) - 2;
                            }
                    }
            }

    public virtual void On_Change(NeuralNetworkEventArgs nne) {
                    if (Change != null) {
                            Change (this, nne);
                    }
            }

    public void pass_forward(double[] applied_inputs, double[] targOuts) {
                    for (int i = 0; i < num_in; i++) {
                            inputs [i] = applied_inputs [i];
                    }
                    for (int i = 0; i < num_hid; i++) {
                            double sum = 0.0;
                            for (int j = 0; j < num_in + 1; j++) {
                                    sum += inputs [j] * i_to_h_wts [j, i];
                            }
                            hidden [i] = SigmoidActivationFunction.processValue (sum);
                    }
                    for (int i = 0; i < num_out; i++) {
                            double sum = 0.0;
                            for (int j = 0; j < num_hid + 1; j++) {
                                    sum += hidden [j] * h_to_o_wts [j, i];
                            }
                            outputs [i] = SigmoidActivationFunction.processValue (sum);
                            NeuralNetworkEventArgs nne = new NeuralNetworkEventArgs (outputs, targOuts);
                            On_Change (nne);
                    }
            }

    #region Public Properties / Methods

    /// <summary>
    /// gets / sets the number of input nodes for the Neural Network
    /// </summary>
    public int NumberOfInputs
    {
        get { return num_in; }
        set { num_in = value; }
    }

    /// <summary>
    /// gets / sets the number of hidden nodes for the Neural Network
    /// </summary>
    public int NumberOfHidden
    {
        get { return num_hid; }
        set { num_hid = value; }
    }

    /// <summary>
    /// gets / sets the number of output nodes for the Neural Network
    /// </summary>
    public int NumberOfOutputs
    {
        get { return num_out; }
        set { num_out = value; }
    }

    /// <summary>
    /// gets / sets the input to hidden weights for the Neural Network
    /// </summary>
    public double[,] InputToHiddenWeights
    {
        get { return i_to_h_wts; }
        set { i_to_h_wts = value; }
    }

    /// <summary>
    /// gets / sets the hidden to output weights for the Neural Network
    /// </summary>
    public double[,] HiddenToOutputWeights
    {
        get { return h_to_o_wts; }
        set { h_to_o_wts = value; }
    }

    /// <summary>
    /// gets / sets the input values for the Neural Network
    /// </summary>
    public double[] Inputs
    {
        get { return inputs; }
        set { inputs = value; }
    }

    /// <summary>
    /// gets / sets the hidden values for the Neural Network
    /// </summary>
    public double[] Hidden
    {
        get { return hidden; }
        set { hidden = value; }
    }

    /// <summary>
    /// gets / sets the outputs values for the Neural Network
    /// </summary>
    public double[] Outputs
    {
        get { return outputs; }
        set { outputs = value; }
    }

    /// <summary>
    /// gets / sets the LearningRate (eta) value for the Neural Network
    /// </summary>
    public double LearningRate
    {
        get { return learningRate; }
        set { learningRate = value; }
    }

    /// <summary>
    /// Gets the error for this NeuralNetwork, based on targets - outputs
    /// </summary>
    /// <param name="targets">The target values, can be {0.0,1.0} or {1.0,0.0}</param>
    /// <returns>the total error for this ANN based on targets - outputs</returns>
    public double getError(double[] targets)
    {

        //storage for error
        double error = 0.0;

        //this calculation is based on something I read about weight space in
        //Artificial Intellegence - A Modern Approach, 2nd edition.Prentice Hall
        //2003. Stuart Rusell, Peter Norvig. Pg 741
        error = Math.Sqrt(Math.Pow((targets[0] - outputs[0]), 2));
        return error;

    }

    #endregion
}

public class NeuralNetworkEventArgs : EventArgs {
    private double[] targOuts;
    private double[] outputs;

    public NeuralNetworkEventArgs(double[] outputs,double[] targOuts){
        this.targOuts = targOuts;
        this.outputs = outputs;
    }

    public double[] TargetOuts {
        get { return targOuts; }
    }

    public double[] Outputs {
        get { return outputs;}
    }
}

public class SigmoidActivationFunction {
    public static double processValue(double x) {
        return 1.0 / (1.0 + Math.Pow (Math.E, -x));
    }
}

}

和:

using System;
using System.Collections.Generic;
using System.Text;
using NeuralNetwork;

namespace GA_ANN_XOR
{
    #region NN_Trainer_XOR CLASS
    /// <summary>
    /// Provides a GA trainer for a
    /// <see cref="NeuralNetwork">NeuralNetwork</see> class
    /// with 2 inputs, 2 hidden, and 1 output, which is trying
    /// to approximate the XOR problem
    /// </summary>
    public class GA_Trainer_XOR
    {
        #region Instance fields
        private Random gen = new Random(5);
        private int training_times = 10000;
        private double[,] train_set =
        {{0, 0},
            {0, 1},
            {1,0},
            {1,1}};

        //population size
        private int POPULATION = 15;
        //ANN's
        private Network[] networks;
        //Mutation
        private double MUTATION = 0.5;
        //Recombination
        private double RECOMBINE = 0.4;
        //flag to detect when we hav found good ANN
        private bool foundGoodANN = false;
        //number of outputs
        private int trainLoop = 0;
        //best configuration index
        private int bestConfiguration = -1;
        //acceptable overall Neural Networ error
        private double acceptableNNError = 0.1;
        //events for gui, generated by the GA trainer
        public delegate void GAChangeHandler(Object sender, TrainerEventArgs te);
        public event GAChangeHandler GAChange;
        public event EventHandler GATrainingDone;
        //events for gui, generated by the NeuralNetwork, but propgated up to gui
        //by the GA trainer, thats why they this event is here, the gui knows nothing
        //about the array of NeuralNetworks, so the event must come through trainer
        public delegate void ChangeHandler(Object sender, NeuralNetworkEventArgs nne);
        public event ChangeHandler NNChange;

        #endregion
        #region Public Properties/Methods


        /// <summary>
        /// Performs a microbial GA (best of last breeding cycle stays in population)
        /// on an array of <see cref="NeuralNetwork"> NeuralNetworks</see> in an attempt
        /// to find a solution to the XOR logix problem. The training presents the entire
        /// training set to a random pair of <see cref="NeuralNetwork"> NeuralNetworks,</see>
        ///  and evaluates which one does best. The winners genes, and some mutation are used
        /// to shape the losers genes, in the hope that the new population will be moving
        /// towards a closer solution.
        /// </summary>
        /// <param name="training_times">the number of times to carry out the
        /// training loop</param>
        /// <returns>The best <see cref="NeuralNetwork"> NeuralNetworks </see>
        /// configuartion found</returns>
        public Network doTraining(int training_times)
        {


            int a = 0;
            int b = 0;
            int WINNER = 0;
            int LOSER = 0;

            #region Training
            //loop for the trainingPeriod
            for (trainLoop = 0; trainLoop < training_times; trainLoop++)
            {
                //fire training loop event
                TrainerEventArgs te = new TrainerEventArgs(trainLoop);
                On_GAChange(te);
                NeuralNetwork.isTraining = true;

                //if the previous evaluation cyle, found a good ANN configuration
                //quit the traning cycle, otherwise, let the breeding continue
                if (foundGoodANN)
                {
                    break;
                }

                //pick 2 ANN's at random, GA - SELECTION
                a = (int)(gen.NextDouble() * POPULATION);
                b = (int)(gen.NextDouble() * POPULATION);

                //work out which was the WINNER and LOSER, GA - EVALUATION
                if (evaluate(a) < evaluate(b))
                {
                    WINNER = a;
                    LOSER = b;
                }
                else
                {
                    WINNER = b;
                    LOSER = a;
                }

                ////get the current value of the ANN weights
                double[,] WINNER_i_to_h_wts = networks[WINNER].InputToHiddenWeights;
                double[,] LOSER_i_to_h_wts = networks[LOSER].InputToHiddenWeights;
                double[,] WINNER_h_to_o_wts = networks[WINNER].HiddenToOutputWeights;
                double[,] LOSER_h_to_o_wts = networks[LOSER].HiddenToOutputWeights;

                ////i_to_h_wts RECOMBINATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfInputs + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfHidden; l++)
                    {
                        //get genes from winner randomly for i_to_h_wts wieghts
                        if (gen.NextDouble() < RECOMBINE)
                        {
                            // set the weights to be that of the input weights from GA
                            LOSER_i_to_h_wts[k,l] = WINNER_i_to_h_wts[k,l];
                        }
                    }
                }

                //h_to_o_wts RECOMBINATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfHidden + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfOutputs; l++)
                    {
                        //get genes from winner randomly for i_to_h_wts wieghts
                        if (gen.NextDouble() < RECOMBINE)
                        {
                            // set the weights to be that of the input weights from GA
                            LOSER_h_to_o_wts[k,l] = WINNER_h_to_o_wts[k,l];
                        }
                    }
                }

                //i_to_h_wts MUTATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfInputs + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfHidden; l++)
                    {
                        //add some mutation randomly
                        if (gen.NextDouble() < MUTATION)
                        {
                            LOSER_i_to_h_wts[k,l] += ((gen.NextDouble() * 0.2) - 0.1);
                        }
                    }
                }

                //h_to_o_wts MUTATION LOOP
                for (int k = 0; k < networks[WINNER].NumberOfHidden + 1; k++)
                {
                    for (int l = 0; l < networks[WINNER].NumberOfOutputs; l++)
                    {
                        //add some mutation randomly
                        if (gen.NextDouble() < MUTATION)
                        {
                            LOSER_h_to_o_wts[k,l] += ((gen.NextDouble() * 0.2) - 0.1);
                        }
                    }
                }

                //update the losers i_to_h_wts genotype
                networks[LOSER].InputToHiddenWeights = LOSER_i_to_h_wts;
                //update the losers i_to_h_wts genotype
                networks[LOSER].HiddenToOutputWeights = LOSER_h_to_o_wts;

            }
            #endregion


            //AT THIS POINT ITS EITHER THE END OF TRAINING OR WE HAVE
            //FOUND AN ACCEPTABLE ANN, WHICH IS BELOW THE VALUE


            //tell gui that training is now done
            On_GATrainingDone(new EventArgs());
            NeuralNetwork.isTraining = false;

            //check to see if there was a best configuration found, may not have done
            //enough training to find a good NeuralNetwork configuration, so will simply
            //have to return the WINNER
            if (bestConfiguration == -1)
            {
                bestConfiguration = WINNER;
            }
            //return the best Neural network
            return networks[bestConfiguration];

        }



        /// <summary>
        /// Is called after the initial training is completed.
        /// Sipmly presents 1 complete set of the training set to
        /// the trained network, which should hopefully get it pretty
        /// correct now its trained
        /// </summary>
        public void doActualRun()
        {
            //loop through the entire training set
            for (int i = 0; i <= train_set.GetUpperBound(0); i++)
            {
                //forward these new values through network
                //forward weights through ANN
                forwardWeights(bestConfiguration, getTrainSet(i));
                double[] targetValues = getTargetValues(getTrainSet(i));
            }
        }


        #endregion
        #region Constructor
        /// <summary>
        /// Constructs a new GA_Trainer_XOR. The constructor creates
        /// the population of <see cref="NeuralNetwork">NeuralNetworks</see>
        ///  and also wires up the underlying <see cref="NeuralNetwork">
        /// NeuralNetworks</see> events, to a new GA event, such that the
        /// <see cref="NeuralNetwork">NeuralNetworks</see> event can be 
        /// propogated to the gui
        /// </summary>
        public GA_Trainer_XOR()
        {
            networks = new Network[POPULATION];

            //create new ANN objects, random weights applied at start
            for (int i = 0; i <= networks.GetUpperBound(0); i++)
            {
                networks[i] = new Network(2, 2, 1);
                networks[i].Change += new Network.ChangeHandler(GA_Trainer_NN_Change);
            }
        }

        #endregion
        #region Events




        /// <summary>
        /// Raises the GA TrainingDone event
        /// </summary>
        /// <param name="te">The TrainerEventArgs</param>
        public virtual void On_GATrainingDone(EventArgs ea)
        {
            if (GATrainingDone != null)
            {
                // Invokes the delegates. 
                GATrainingDone(this, ea);
            }
        }


        /// <summary>
        /// Raises the GA Change event
        /// </summary>
        /// <param name="te">The TrainerEventArgs</param>
        public virtual void On_GAChange(TrainerEventArgs te)
        {
            if (GAChange != null)
            {
                // Invokes the delegates. 
                GAChange(this, te);
            }
        }

        /// <summary>
        /// Raises the NeuralNetwork Change event, simply propogates
        /// original <see cref="NeuralNetwork">NeuralNetwork</see> 
        /// event up to the GUI
        /// </summary>
        /// <param name="nne">The NeuralNetworkEventArgs</param>
        public virtual void On_NNChange(NeuralNetworkEventArgs nne)
        {
            if (NNChange != null)
            {
                // Invokes the delegates. 
                NNChange(this, nne);
            }
        }
        #endregion
        #region Private Methods

        /// <summary>
        /// Evaluates a member of the population (of <see cref="NeuralNetwork">
        /// NeuralNetworks</see>
        /// </summary>
        /// <param name="popMember">The member of the population to evaluate</param>
        /// <returns>An overall error value for this population member, which is
        /// the result of applying the complete training set to the population
        /// member, with its current weight configuration</returns>
        private double evaluate(int popMember)
        {

            double error = 0.0;

            //loop through the entire training set
            for (int i = 0; i <= train_set.GetUpperBound(0); i++)
            {
                //forward these new values through network
                //forward weights through ANN
                forwardWeights(popMember, getTrainSet(i));
                double[] targetValues = getTargetValues(getTrainSet(i));
                error += networks[popMember].getError(targetValues);

            }
            //if the Error term is < acceptableNNError value we have found
            //a good configuration of weights for teh NeuralNetwork, so tell
            //GA to stop looking
            if (error < acceptableNNError)
            {
                bestConfiguration = popMember;
                foundGoodANN = true;
            }

            //return error
            return error;
        }


        /// <summary>
        /// This event is simply here to propogate the underlying 
        /// <see cref="NeuralNetwork">NeuralNetworks</see> Change
        /// event, to the gui. The gui has no visibility of the 
        /// array of <see cref="NeuralNetwork">NeuralNetworks</see>
        /// so this trainer class propogates the events from the
        /// <see cref="NeuralNetwork">NeuralNetworks</see> to the gui
        /// </summary>
        /// <param name="sender">The orginal <see cref="NeuralNetwork">NeuralNetwork</see>
        /// that changed</param>
        /// <param name="nne">The NeuralNetworkEventArgs</param>
        private void GA_Trainer_NN_Change(object sender, NeuralNetworkEventArgs nne)
        {
            On_NNChange(nne);
        }

        /// <summary>
        /// Returns the array within the 2D train_set array as the index
        /// specfied by the idx input parameter
        /// </summary>
        /// <param name="idx">The index into the 2d array to get</param>
        /// <returns>The array within the 2D train_set array as the index
        /// specfied by the idx input parameter</returns>
        private double[] getTrainSet(int idx)
        {
            //NOTE :
            //
            //If anyone can tell me how to return an array at index idx from
            //a 2D array, which is holding arrays of arrays I would like that
            //very much.
            //I thought it would be
            //double[] trainValues= (double[])train_set.GetValue(0);
            //but this didn't work, so am doing it like this

            double[] trainValues = { train_set[idx, 0], train_set[idx, 1] };
            return trainValues;
        }


        /// <summary>
        /// Forwards the weights from the input->hidden and also from
        /// the hidden->output nodes, for the trainingSet
        /// </summary>
        /// <param name="popMember">The population member</param>
        /// <param name="trainingSet">The training set to present to the 
        /// <see cref="NeuralNetwork"/>NeuralNetwork</param>
        private void forwardWeights(int popMember, double[] trainingSet)
        {
            //forward weights through ANN
            networks[popMember].pass_forward(trainingSet,getTargetValues(trainingSet));
        }

        /// <summary>
        /// Returns a double which represents the output for the
        /// current set of inputs.
        /// In the cases where the summed inputs = 1, then target
        /// should be 1.0, otherwise it should be 0.0. 
        /// This is only for the XOR problem, but this is a trainer
        /// for the XOR problem, so this is fine.
        /// </summary>
        /// <param name="currSet">The current set of inputs</param>
        /// <returns>A double which represents the output for the
        /// current set of inputs</returns>
        private double[] getTargetValues(double[] currSet)
        {
            //the current value of the training set
            double valOfSet = 0;
            double[] targs = new double[1];
            for (int i = 0; i < currSet.Length; i++)
            {
                valOfSet += currSet[i];
            }
            //in the cases where the summed inputs = 1, then target
            //should be 1.0, otherwise it should be 0.0
            targs[0] = valOfSet == 1 ? 1.0 : 0.0;
            return targs;
        }
        #endregion
    }
    #endregion
    #region TrainerEventArgs CLASS
    /// <summary>
    /// Provides the event argumets for the 
    /// <see cref="GA_Trainer_XOR">trainer</see> class
    /// </summary>
    public class TrainerEventArgs : EventArgs
    {
        #region Instance Fields
        //Instance fields
        private int trainLoop = 0;

        #endregion
        #region Public Constructor

        /// <summary>
        /// Constructs a new TrainerEventArgs object using the parameters provided
        /// </summary>
        /// <param name="trainLoop">The current training loop</param>
        public TrainerEventArgs(int trainLoop)
        {
            this.trainLoop = trainLoop;
        }
        #endregion
        #region Public Methods/Properties

        /// <summary>
        /// gets the training loop number
        /// </summary>
        public int TrainingLoop
        {
            get { return trainLoop; }
        }
        #endregion

    }
    #endregion
}

這是錯誤代碼:

Assets/Scripts/System/NeuralNetwork/Trainer.cs(238,102): error CS0123: A method or delegate `GA_ANN_XOR.GA_Trainer_XOR.GA_Trainer_NN_Change(object, NeuralNetwork.NeuralNetworkEventArgs)' parameters do not match delegate `NeuralNetwork.Network.ChangeHandler(object, System.EventArgs)' parameters

一個簡單的解決方法是更改​​此問題:

public delegate void ChangeHandler (System.Object sender, EventArgs nne);

對此:

public delegate void ChangeHandler (System.Object sender, NeuralNetworkEventArgs nne);

這樣可以解決問題。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM