Decimal to Fraction conversion in Swift

后端 未结 2 909
醉酒成梦
醉酒成梦 2020-11-30 09:14

I am building a calculator and want it to automatically convert every decimal into a fraction. So if the user calculates an expression for which the answer is \"0.333333...\

相关标签:
2条回答
  • 2020-11-30 09:47

    As Martin R said, the Only way to have (99.99%)exact calculations, is to calculate everything with rational numbers, from beginning to the end.

    the reason behind the creation of this class was also the fact that i needed to have very accurate calculations, and that was not possible with the swift-provided types. so i created my own type.

    here is the code, i'll explain it below.

    class Rational {
    
       var alpha = 0
       var beta = 0
    
       init(_ a: Int, _ b: Int) {
           if (a > 0 && b > 0) || (a < 0 && b < 0) {
               simplifier(a,b,"+")
           }
           else {
               simplifier(a,b,"-")
           }
       }
    
       init(_ double: Double, accuracy: Int = -1) {
           exponent(double, accuracy)
       }
    
       func exponent(_ double: Double, _ accuracy: Int) {
           //Converts a double to a rational number, in which the denominator is of power of 10.
    
           var exp = 1
           var double = double
    
           if accuracy != -1 {
               double = Double(NSString(format: "%.\(accuracy)f" as NSString, double) as String)!
           }
    
           while (double*Double(exp)).remainder(dividingBy: 1) != 0 {
               exp *= 10
           }
    
           if double > 0 {
               simplifier(Int(double*Double(exp)), exp, "+")
           }
           else {
               simplifier(Int(double*Double(exp)), exp, "-")
           }
    
       }
    
       func gcd(_ alpha: Int, _ beta: Int) -> Int {
           // Calculates 'Greatest Common Divisor'
    
           var inti: [Int] = []
           var multi = 1
           var a = Swift.min(alpha,beta)
           var b = Swift.max(alpha,beta)
    
               for idx in 2...a {
                   if idx != 1 {
                       while (a%idx == 0 && b%idx == 0) {
                           a = a/idx
                           b = b/idx
                           inti.append(idx)
                       }
                   }
               }
           inti.map{ multi *= $0 }
           return multi
       }
    
    
       func simplifier(_ alpha: Int, _ beta: Int, _ posOrNeg: String) {
           //Simplifies nominator and denominator (alpha and beta) so they are 'prime' to one another.
    
           let alpha = alpha > 0 ? alpha : -alpha
           let beta = beta > 0 ? beta : -beta
    
           let greatestCommonDivisor = gcd(alpha,beta)
    
           self.alpha = posOrNeg == "+" ? alpha/greatestCommonDivisor : -alpha/greatestCommonDivisor
           self.beta = beta/greatestCommonDivisor
       }
    
    }
    
    typealias Rnl = Rational
    
    func *(a: Rational, b: Rational) -> Rational {
    
       let aa = a.alpha*b.alpha
       let bb = a.beta*b.beta
    
       return Rational(aa, bb)
    
    }
    
    func /(a: Rational, b: Rational) -> Rational {
    
       let aa = a.alpha*b.beta
       let bb = a.beta*b.alpha
    
       return Rational(aa, bb)
    
    }
    
    func +(a: Rational, b: Rational) -> Rational {
    
       let aa = a.alpha*b.beta + a.beta*b.alpha
       let bb = a.beta*b.beta
    
       return Rational(aa, bb)
    
    }
    
    func -(a: Rational, b: Rational) -> Rational {
    
       let aa = a.alpha*b.beta - a.beta*b.alpha
       let bb = a.beta*b.beta
    
       return Rational(aa, bb)
    
    }
    
    extension Rational {
    
       func value() -> Double {
           return Double(self.alpha) / Double(self.beta)
       }
    
    }
    
    extension Rational {
    
       func rnlValue() -> String {
    
           if self.beta == 1 {
               return "\(self.alpha)"
           }
           else if self.alpha == 0  {
               return "0"
           }
           else {
               return "\(self.alpha) / \(self.beta)"
           }
       }
    
    }
    
    // examples:
    
    let first = Rnl(120,45)
    let second = Rnl(36,88)
    let third = Rnl(2.33435, accuracy: 2)
    let forth = Rnl(2.33435)
    
    print(first.alpha, first.beta, first.value(), first.rnlValue()) // prints  8   3   2.6666666666666665   8 / 3
    print((first*second).rnlValue()) // prints  12 / 11
    print((first+second).rnlValue()) // prints  203 / 66
    print(third.value(), forth.value()) // prints  2.33   2.33435
    
    

    First of all, we have the class itself. the class can be initialised in two ways:

    in the Rational class, alpha ~= nominator & beta ~= denominator

    The First way is initialising the class using two integers, first of with is the nominator, and the second one is the denominator. the class gets those two integers, and then reduces them to the least numbers possible. e.g reduces (10,5) to (2,1) or as another example, reduces (144, 60) to (12,5). this way, always the simplest numbers are stored. this is possible using the gcd (greatest common divisor) function and simplifier function, which are not hard to understand from the code. the only thing is that the class faces some issues with negative numbers, so it always saves whether the final rational number is negative or positive, and if its negative it makes the nominator negative.

    The Second way to initialise the class, is with a double, and with an optional parameter called 'accuracy'. the class gets the double, and also the accuracy of how much numbers after decimal point you need, and converts the double to a nominator/denominator form, in which the denominator is of power of 10. e.g 2.334 will be 2334/1000 or 342.57 will be 34257/100. then tries to simplify the rational numbers using the same method which was explained in the #1 way.

    After the class definition, there is type-alias 'Rnl', which you can obviously change it as you wish.

    Then there are 4 functions, for the 4 main actions of math: * / + -, which i defined so e.g. you can easily multiply two numbers of type Rational.

    After that, there are 2 extensions to Rational type, first of which ('value') gives you the double value of a Rational number, the second one ('rnlValue') gives you the the Rational number in form of a human-readable string: "nominator / denominator"

    At last, you can see some examples of how all these work.

    0 讨论(0)
  • 2020-11-30 10:00

    If you want to display the results of calculations as rational numbers then the only 100% correct solution is to use rational arithmetic throughout all calculations, i.e. all intermediate values are stored as a pair of integers (numerator, denominator), and all additions, multiplications, divisions, etc are done using the rules for rational numbers.

    As soon as a result is assigned to a binary floating point number such as Double, information is lost. For example,

    let x : Double = 7/10
    

    stores in x an approximation of 0.7, because that number cannot be represented exactly as a Double. From

    print(String(format:"%a", x)) // 0x1.6666666666666p-1
    

    one can see that x holds the value

    0x16666666666666 * 2^(-53) = 6305039478318694 / 9007199254740992
                               ≈ 0.69999999999999995559107901499373838305
    

    So a correct representation of x as a rational number would be 6305039478318694 / 9007199254740992, but that is of course not what you expect. What you expect is 7/10, but there is another problem:

    let x : Double = 69999999999999996/100000000000000000
    

    assigns exactly the same value to x, it is indistinguishable from 0.7 within the precision of a Double.

    So should x be displayed as 7/10 or as 69999999999999996/100000000000000000 ?

    As said above, using rational arithmetic would be the perfect solution. If that is not viable, then you can convert the Double back to a rational number with a given precision. (The following is taken from Algorithm for LCM of doubles in Swift.)

    Continued Fractions are an efficient method to create a (finite or infinite) sequence of fractions hn/kn that are arbitrary good approximations to a given real number x, and here is a possible implementation in Swift:

    typealias Rational = (num : Int, den : Int)
    
    func rationalApproximationOf(x0 : Double, withPrecision eps : Double = 1.0E-6) -> Rational {
        var x = x0
        var a = floor(x)
        var (h1, k1, h, k) = (1, 0, Int(a), 1)
    
        while x - a > eps * Double(k) * Double(k) {
            x = 1.0/(x - a)
            a = floor(x)
            (h1, k1, h, k) = (h, k, h1 + Int(a) * h, k1 + Int(a) * k)
        }
        return (h, k)
    }
    

    Examples:

    rationalApproximationOf(0.333333) // (1, 3)
    rationalApproximationOf(0.25)     // (1, 4)
    rationalApproximationOf(0.1764705882) // (3, 17)
    

    The default precision is 1.0E-6, but you can adjust that to your needs:

    rationalApproximationOf(0.142857) // (1, 7)
    rationalApproximationOf(0.142857, withPrecision: 1.0E-10) // (142857, 1000000)
    
    rationalApproximationOf(M_PI) // (355, 113)
    rationalApproximationOf(M_PI, withPrecision: 1.0E-7) // (103993, 33102)
    rationalApproximationOf(M_PI, withPrecision: 1.0E-10) // (312689, 99532)
    

    Swift 3 version:

    typealias Rational = (num : Int, den : Int)
    
    func rationalApproximation(of x0 : Double, withPrecision eps : Double = 1.0E-6) -> Rational {
        var x = x0
        var a = x.rounded(.down)
        var (h1, k1, h, k) = (1, 0, Int(a), 1)
    
        while x - a > eps * Double(k) * Double(k) {
            x = 1.0/(x - a)
            a = x.rounded(.down)
            (h1, k1, h, k) = (h, k, h1 + Int(a) * h, k1 + Int(a) * k)
        }
        return (h, k)
    }
    

    Examples:

    rationalApproximation(of: 0.333333) // (1, 3)
    rationalApproximation(of: 0.142857, withPrecision: 1.0E-10) // (142857, 1000000)
    

    Or – as suggested by @brandonscript – with a struct Rational and an initializer:

    struct Rational {
        let numerator : Int
        let denominator: Int
    
        init(numerator: Int, denominator: Int) {
            self.numerator = numerator
            self.denominator = denominator
        }
    
        init(approximating x0: Double, withPrecision eps: Double = 1.0E-6) {
            var x = x0
            var a = x.rounded(.down)
            var (h1, k1, h, k) = (1, 0, Int(a), 1)
    
            while x - a > eps * Double(k) * Double(k) {
                x = 1.0/(x - a)
                a = x.rounded(.down)
                (h1, k1, h, k) = (h, k, h1 + Int(a) * h, k1 + Int(a) * k)
            }
            self.init(numerator: h, denominator: k)
        }
    }
    

    Example usage:

    print(Rational(approximating: 0.333333))
    // Rational(numerator: 1, denominator: 3)
    
    print(Rational(approximating: .pi, withPrecision: 1.0E-7))
    // Rational(numerator: 103993, denominator: 33102)
    
    0 讨论(0)
提交回复
热议问题